예제 #1
0
    def __init__(self, parent=None):
        QDialog.__init__(self, parent)

        self.cam = webcam(640, 480, '/dev/video0')
        self.cam.activate()

        self.statusLabel = QLabel()
        self.quitButton = QPushButton("Quit")
        self.quitButton.setAutoDefault(False)

        buttonLayout = QHBoxLayout()
        buttonLayout.addStretch(1)
        buttonLayout.addWidget(self.quitButton)
        buttonLayout.addStretch(1)

        mainLayout = QVBoxLayout()
        mainLayout.addWidget(self.statusLabel)
        mainLayout.addLayout(buttonLayout)
        self.setLayout(mainLayout)

        self.setWindowTitle('Frame Server')

        self.tcpServer = QTcpServer(self)
        self.tcpServer.listen()
        self.client = None

        ipAddress = ''
        for address in QNetworkInterface.allAddresses():
            if address != QHostAddress.LocalHost and address.toIPv4Address():
                ipAddress = address.toString()
                break

        if len(ipAddress) == 0:
            ipAddress = QHostAddress(QHostAddress.LocalHost).toString()

        self.statusLabel.setText(
            'Address:\t\t{}\nPort:\t\t\t{}'.format(ipAddress, self.tcpServer.serverPort()))

        self.quitButton.clicked.connect(self.close)
        self.tcpServer.newConnection.connect(self.clientConnected)
예제 #2
0
파일: swpi.py 프로젝트: cbolk/swpi
	try:
		#if ( cfg.usedongle ):  log("Signal quality : " + str(modem.get_rssi()))

		if ( cfg.wifi_reset_if_down ) :
			os.system("sudo ./wifi_reset.sh")
		
		# Wait till 45 seconds in case of PCE-FWS20 to avoid USB overload
		if (cfg.use_wind_sensor and cfg.sensor_type == "PCE-FWS20"):
			seconds = datetime.datetime.now().second
			if ( seconds < 45 ):
				time.sleep(45-seconds)
		
		waitForHandUP()  # do to replace with lock object
		# WebCam 1
		if ( cfg.webcamDevice1.upper() != "NONE" ):
			webcam1 =  webcam.webcam(1,cfg)
			img1FileName = "./img/webcam1_" + datetime.datetime.now().strftime("%d%m%Y-%H%M%S.jpg") 
			waitForHandUP()
			bwebcam1 = webcam1.capture(img1FileName)
			if ( bwebcam1 ):
				log( "Webcam 1 Captured : ok : "  + img1FileName )
				addTextandResizePhoto(img1FileName,cfg.webcamdevice1finalresolutionX,cfg.webcamdevice1finalresolutionY,cfg,v)
		# WebCam 2
		if ( cfg.webcamDevice2.upper() != "NONE" ):
			webcam2 =  webcam.webcam(2,cfg)
			img2FileName = "./img/webcam2_" + datetime.datetime.now().strftime("%d%m%Y-%H%M%S.jpg")
			waitForHandUP()
			bwebcam2 = webcam2.capture(img2FileName)
			if ( bwebcam2):
				log( "Webcam 2 Capruterd : "  + img2FileName	)	
				addTextandResizePhoto(img2FileName,cfg.webcamdevice2finalresolutionX,cfg.webcamdevice2finalresolutionY,cfg,v)	
예제 #3
0
def main():
    script_desc = 'Interact with ball position control realtime via mouse events'
    args = setup_arg_parser(script_desc)
    vid_conf = args.camera
    maze_conf = args.maze
    vid_settings = read_yaml(vid_conf)
    maze_settings = read_yaml(maze_conf)
    window_name = vid_settings['window_name']

    camera = webcam(vid_settings)
    d = detector(vid_settings, maze_settings)
    c = position_controller(vid_settings, maze_settings)
    file = args.path
    p = path(file, t=5, cycle=True)
    l = logger()

    time_begin = timer()

    n = 10

    # Main loop - object detection and labeling for each video frame
    while True:
        frame_time = timer()  # time from when frame was taken

        ### Step 1: Get video frame
        ret, frame = camera.read_frame()
        if not ret:
            print("Error: video frame not loaded.")
            break
        d.frame_count += 1

        start = timer()  # time at which frame was ready

        ### Step 2: crop and transform to get final maze image
        frame, pts = d.crop_and_transform(frame)
        # frame, pts = d.crop_no_transform(frame)

        ### Step 3: detect objects
        d.detect_objects(frame)

        #update PID control
        c.process_update(d.ball_pos)
        if d.ball_pos is not None:
            # print(c.output)
            draw_magnitude(frame, d.ball_pos, c.output,
                           vid_settings['magnitude_scalar'],
                           color_map['brightorange'])

        end = timer()  # time after all calculation were completed

        ### Step 4: Draw detected objects and message text to video frame
        d.annotate_ball(frame)

        # draw table tilt target output where ball is located
        if d.ball_pos is not None:
            draw_magnitude(frame, d.ball_pos, c.output,
                           vid_settings['magnitude_scalar'],
                           color_map['brightorange'])

        # draw error line
        if d.ball_pos and c.target is not None:
            draw_line(frame, (c.target[0], c.target[1]),
                      (d.ball_pos[0], d.ball_pos[1]),
                      BGR_color=color_map['red'])

        display_performance(frame, d.text_tr, d.text_spacing, start, end,
                            frame_time, vid_settings['text_size'])

        if pts is not None:
            draw_corners(frame, pts)

        # update and show path
        if d.ball_pos is not None:
            p.process_update(d.ball_pos)
            c.set_target(p.pts[p.idx])
        p.draw_waypoints(frame, d.ball_pos)

        ### Step 5: Display video on screen
        cv.imshow(window_name, frame)

        ### log for later graphing
        if d.ball_pos and c.target is not None:
            # l.log_new_data(d.ball_pos, c.target, np.round(end - time_begin, 2))
            l.log_new_data(d.ball_pos, c.target, np.round(end - time_begin, 2),
                           d.noisy_ball_pos)

        ### Step 6: Check for key command
        if cv.waitKey(1) == ord('q'):
            break

        if p.n >= n:
            break

    # clean up
    camera.vid.release()
    cv.destroyAllWindows()

    # write logs
    l.write_log()

    # Print statistics to terminal
    print(f'frames captured: {d.frame_count}')
    print(f"Number of frames where ball was missed: {d.missed_frames_ball}")
    print(
        f"Ball detection rate: {np.around((1 - d.missed_frames_ball / d.frame_count), decimals=4) * 100}%"
    )
예제 #4
0
import cv2
import webcam
import time

fourcc = cv2.VideoWriter_fourcc(*'DIVX')
#writer = cv2.VideoWriter('output.avi',fourcc, 20.0, (640, 480), True)
cam = webcam.webcam('http://192.168.0.109:8081')
frames = cam.getFrame()
num = 120
start = time.time()
for frame in frames:
    num = num - 1
    if num == 0:
        end = time.time()
        seconds = end - start
        fps = 120 / seconds
        print("Estimated frames per second : ", fps)
예제 #5
0
import streamlit as st

from webcam import webcam

st.title("Webcam capture component")

st.write("""
- Accesses the user's webcam and displays the video feed in the browser.
- Click the "Capture Frame" button to grab the current video frame and
return it to Streamlit.
""")
captured_image = webcam()
if captured_image is None:
    st.write("Waiting for capture...")
else:
    st.write("Got an image from the webcam:")
    st.image(captured_image)

def main():
    script_desc = 'Interact with ball velocity control realtime via controller joystick'
    args = setup_arg_parser(script_desc)
    vid_conf = args.camera
    maze_conf = args.maze
    vid_settings = read_yaml(vid_conf)
    maze_settings = read_yaml(maze_conf)
    window_name = vid_settings['window_name']

    camera = webcam(vid_settings)
    d = detector(vid_settings, maze_settings)
    c = velocity_controller(vid_settings, maze_settings)
    ps4 = ps4_controller()

    # Main loop - object detection and labeling for each video frame
    while True:
        frame_time = timer()  # time from when frame was taken

        ### Step 1: Get video frame
        ret, frame = camera.read_frame()
        if not ret:
            print("Error: video frame not loaded.")
            break
        d.frame_count += 1

        c.set_target_velocity(ps4.read_joystick())
        start = timer()  # time at which frame was ready

        ### Step 2: crop and transform to get final maze image
        frame, pts = d.crop_and_transform(frame)
        # frame, pts = d.crop_no_transform(frame)

        ### Step 3: detect objects
        if frame is not None:
            d.detect_objects(frame)

        #update PID control
        c.process_update(d.ball_pos, d.kf)

        end = timer()  # time after all calculation were completed

        ### Step 4: Draw detected objects and message text to video frame
        d.annotate_ball(frame)

        # draw table tilt magnitude where ball is located
        if d.ball_pos is not None:
            draw_magnitude(frame, d.ball_pos, c.output,
                           vid_settings['magnitude_scalar'],
                           color_map['brightorange'])

        if d.ball_pos is not None:
            draw_magnitude(frame, d.ball_pos, ps4.axis_data,
                           vid_settings['magnitude_scalar'],
                           color_map['green'])

        # draw error line
        # if d.ball_pos and c.target is not None:
        # draw_line(frame, (c.target[0], c.target[1]), (d.ball_pos[0], d.ball_pos[1]), BGR_color=color_map['red'])

        if frame is not None:
            display_performance(frame, d.text_tr, d.text_spacing, start, end,
                                frame_time, vid_settings['text_size'])

        # display mouse event to screen
        # if c.target is not None:
        # draw_circles(frame, [c.target], num=1, BGR_color=color_map['green'])

        ### Step 5: Display video on screen
        if frame is not None:
            cv.imshow(window_name, frame)

        ### Step 6: Check for key command
        if cv.waitKey(1) == ord('q'):
            break

    # clean up
    camera.vid.release()
    cv.destroyAllWindows()

    # Print statistics to terminal
    print(f'frames captured: {d.frame_count}')
    print(f"Number of frames where ball was missed: {d.missed_frames_ball}")
    print(
        f"Ball detection rate: {np.around((1 - d.missed_frames_ball / d.frame_count), decimals=4) * 100}%"
    )
예제 #7
0
	try:
		#if ( cfg.usedongle ):  log("Signal quality : " + str(modem.get_rssi()))

		if ( cfg.wifi_reset_if_down ) :
			os.system("sudo ./wifi_reset.sh")
		
		# Wait till 45 seconds in case of PCE-FWS20 to avoid USB overload
		if (cfg.use_wind_sensor and cfg.sensor_type == "PCE-FWS20"):
			seconds = datetime.datetime.now().second
			if ( seconds < 45 ):
				time.sleep(45-seconds)
		
		waitForHandUP()  # do to replace with lock object
		# WebCam 1
		if ( cfg.webcamDevice1.upper() != "NONE" ):
			webcam1 =  webcam.webcam(1,cfg)
			img1FileName = "./img/webcam1_" + datetime.datetime.now().strftime("%d%m%Y-%H%M%S.jpg") 
			waitForHandUP()
			bwebcam1 = webcam1.capture(img1FileName)
			if ( bwebcam1 ):
				log( "Webcam 1 Captured : ok : "  + img1FileName )
				addTextandResizePhoto(img1FileName,cfg.webcamdevice1finalresolutionX,cfg.webcamdevice1finalresolutionY,cfg,v)
		# WebCam 2
		if ( cfg.webcamDevice2.upper() != "NONE" ):
			webcam2 =  webcam.webcam(2,cfg)
			img2FileName = "./img/webcam2_" + datetime.datetime.now().strftime("%d%m%Y-%H%M%S.jpg")
			waitForHandUP()
			bwebcam2 = webcam2.capture(img2FileName)
			if ( bwebcam2):
				log( "Webcam 2 Captured : ok : "  + img2FileName	)	
				addTextandResizePhoto(img2FileName,cfg.webcamdevice2finalresolutionX,cfg.webcamdevice2finalresolutionY,cfg,v)