class SensorFactory(GstRtspServer.RTSPMediaFactory): def __init__(self, url, **properties): super(SensorFactory, self).__init__(**properties) self.vs = VideoStream(url).start() self.number_frames = 0 self.fps = 20 self.duration = 1 / self.fps * Gst.SECOND # duration of a frame in nanoseconds self.shape = (720, 1280) self.launch_string = 'appsrc name=source is-live=true block=true format=GST_FORMAT_TIME ' \ 'caps=video/x-raw,format=BGR,width={},height={} ' \ '! videoconvert ! video/x-raw,format=I420 ' \ '! x264enc ! queue ' \ '! rtph264pay config-interval=1 name=pay0 pt=96'.format(self.shape[1], self.shape[0]) def on_need_data(self, src, lenght): # time.sleep(0.1) frame = self.vs.read() if frame is None: return # print(frame) frame = cv2.resize(frame, self.shape[::-1]) data = frame.tostring() buf = Gst.Buffer.new_allocate(None, len(data), None) buf.fill(0, data) buf.duration = self.duration timestamp = self.number_frames * self.duration buf.pts = buf.dts = int(timestamp) buf.offset = timestamp self.number_frames += 1 retval = src.emit('push-buffer', buf) print('pushed buffer, frame {}, duration {} ns, durations {} s'.format(self.number_frames, self.duration, self.duration / Gst.SECOND)) if retval != Gst.FlowReturn.OK: print(retval) def do_create_element(self, url): return Gst.parse_launch(self.launch_string) def do_configure(self, rtsp_media): self.number_frames = 0 appsrc = rtsp_media.get_element().get_child_by_name('source') appsrc.connect('need-data', self.on_need_data)
def main(): vs = VideoStream() vs.start() names, known_encodings = load_known_faces('./faces/known_faces') print(len(known_encodings)) while vs.isOpened(): image = vs.read() gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) face_locations = fr.face_locations(image, model='hog') img_face_encodings = fr.face_encodings(image, face_locations) match_matrix = [ fr.compare_faces(known_encodings, f, tolerance=0.6) for f in img_face_encodings ] print(match_matrix) img_with_faces = draw_bbox_on_img(image, face_locations) cv2.imshow('frame', img_with_faces) if cv2.waitKey(1) & 0xFF == ord('q'): break vs.close() cv2.destroyAllWindows()
from face_decorator import allEAR from face_decorator import FaceDecorator from eye_aspect_ratio import EyeAspectRatio from config import args vs = VideoStream(args["path"]).start() workers = [ GrayConverter(), FaceDetector(), FaceMarker(args["landmark_model"]), EyeAspectRatio(), FaceDecorator() ] winname = args["display"] graphWin = "Graph" cv2.namedWindow(winname) cv2.moveWindow(winname, 200, 300) cv2.namedWindow(graphWin) cv2.moveWindow(graphWin, 900, 300) while True: frame = vs.read() if frame is None: break info = {} for w in workers: w.workon(frame, info) cv2.imshow(winname, info["output"]) cv2.imshow(graphWin, info["graph"]) key = cv2.waitKey(10) & 0xFF if key == ord('q') or key == 27: break elif key == ord(' '): cv2.waitKey(0) cv2.destroyAllWindows() vs.stop()
# For fps plotting time_arr = [] fps_arr = [] start_time = time.time() # for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): try: while True: bboxes = [] # Start timer (for calculating frame rate) t1 = cv2.getTickCount() # Grab frame from video stream current_frame = videostream.read() input_data = img_to_input(current_frame, model.width, model.height, model.floating_model) # Infer model boxes, classes, scores = model.infer(input_data) max_area = -1 # Loop over all detections and draw detection box if confidence is above minimum threshold for i in range(len(scores)): if min_conf_threshold < scores[i] <= 1.0: object_name = labels[int(classes[i])] if object_name != 'person': continue # Get bounding box coordinates and draw box
# Initialize video stream videostream = VideoStream(resolution=(CAMERA_RES[0], CAMERA_RES[1])).start() time.sleep(1) # Create list of parking spots parkingspots = [ ParkingSpot("PS1", 100, 100, 300, 500), ParkingSpot("PS2", 350, 100, 550, 500)] while True: # Start frame rate timer t1 = cv2.getTickCount() # Get snapshot of video stream, convert to correct color space and resize according to input requirements frame = videostream.read() frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame_resized = cv2.resize(frame_rgb, (input_width, input_height)) input_data = np.expand_dims(frame_resized, axis=0) # Perform the object detection by running the model with the image as input interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() # Detection results boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Object box cooridinates: boxes[][ymin, xmin, ymax, xmax] classes = interpreter.get_tensor(output_details[1]['index'])[0] # Index of object type in labels.txt file: classes[][index] scores = interpreter.get_tensor(output_details[2]['index'])[0] # Model confidence in class definition: scores[][float] tracked_objects = 0
def recognize_video(detector, embedder: Embedder, recognizer: Recognizer, detector_params='default', source=0): # инициализация видеопотока print('Starting video stream...') vs = VideoStream(src=source).start() if not is_detector(detector): raise TypeError('Incorrect type of detector') # разогрев камеры time.sleep(0.5) # запуск оценщика пропускной способности FPS fps = FPS().start() # цикл по фреймам из видео while True: frame = vs.read() if detector_params == 'default': faces_roi, boxes = detector.calc_image(frame, return_mode='both') elif type(detector) == DetectorSSD: confidence = detector_params[0] faces_roi, boxes = detector.calc_image(frame, confidence=confidence, return_mode='both') elif type(detector) == DetectorVJ or type(detector) == DetectorLBP: [scale_factor, min_neighbors] = detector_params faces_roi, boxes = detector.calc_image(frame, scale_factor=scale_factor, min_neighbors=min_neighbors, return_mode='both') elif type(detector) == DetectorHOG or type(detector) == DetectorMMOD: upsampling_times = detector_params[0] faces_roi, boxes = detector.calc_image( frame, upsampling_times=upsampling_times, return_mode='both') for i in range(len(faces_roi)): embeddings = embedder.calc_face(faces_roi[i]) name = recognizer.recognize(embeddings) start_x, start_y, end_x, end_y = boxes[i] text = '{}'.format(name) y = start_y - 10 if start_y - 10 > 10 else start_y + 10 cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), (0, 0, 255), 2) cv2.putText(frame, text, (start_x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) # обновление счетчика FPS fps.update() # показ выходного фрейма cv2.imshow('Frame', frame) key = cv2.waitKey(1) & 0xFF # завершение при нажатии 'q' if key == ord("q"): break fps.stop() print('Elasped time: {:.2f}'.format(fps.elapsed())) print('Approx. FPS: {:.2f}'.format(fps.fps())) cv2.destroyAllWindows() vs.stop()
def m(): global output, heatmap_image, total_people, field1_count, field2_count frame_rate_calc = 1 freq = cv2.getTickFrequency() videostream = VideoStream(VIDEO_PATH).start() color_f1 = (0, 0, 255) color_f2 = (255, 0, 0) heatmap = np.zeros((720, 1270, 3), dtype=np.uint8) ht_color = (191, 255, 1) while True: t1 = cv2.getTickCount() frame1 = videostream.read() frame = frame1.copy() boxes, classes, scores = generate_detections(frame, interpreter) total_people = 0 field1_count = 0 field2_count = 0 for i in range(len(scores)): if ((scores[i] > THRESHOLD) and (scores[i] <= 1.0)): ymin = int(max(1, (boxes[i][0] * imH))) xmin = int(max(1, (boxes[i][1] * imW))) ymax = int(min(imH, (boxes[i][2] * imH))) xmax = int(min(imW, (boxes[i][3] * imW))) total_people = total_people + 1 center_x = int((xmin + xmax) / 2) center_y = int((ymin + ymax) / 2) center_coor = (center_x, center_y) color_bb = (10, 200, 10) cv2.circle(frame, center_coor, 10, color_bb, cv2.FILLED) pts_f1 = [[522, 138], [1066, 522], [1200, 270], [580, 30]] pts_f2 = [[172, 142], [410, 607], [657, 440], [319, 142]] create_polygon(pts_f1, frame, color_f1) create_polygon(pts_f2, frame, color_f2) center_point = Point(center_x, center_y) polygon_f1 = Polygon(pts_f1) polygon_f2 = Polygon(pts_f2) if is_field_contain_center(polygon_f1, center_point): field1_count = field1_count + 1 color_bb = color_f1 if is_field_contain_center(polygon_f2, center_point): field2_count = field2_count + 1 color_bb = color_f2 draw_bounding_boxes(frame, classes, xmin, xmax, ymin, ymax, color_bb, labels) if (heatmap[center_y, center_x][0] != 0) and (heatmap[center_y, center_x][1] != 0) and (heatmap[center_y, center_x][2] != 0): b = heatmap[center_y, center_x][0] g = heatmap[center_y, center_x][1] r = heatmap[center_y, center_x][2] b = b - b * 0.5 g = g - g * 0.2 r = r + r * 0.5 cv2.circle(heatmap, center_coor, 10, (b, g, r), cv2.FILLED) else: cv2.circle(heatmap, center_coor, 10, ht_color, cv2.FILLED) cv2.putText(frame, 'FPS: {0:.2f}'.format(frame_rate_calc), (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2, cv2.LINE_AA) frame = cv2.resize(frame, (698, 396)) t2 = cv2.getTickCount() time1 = (t2 - t1) / freq frame_rate_calc = 1 / time1 overlay = frame1 alpha_backgroud = 0.7 alpha_heatmap = 0.9 cv2.addWeighted(overlay, alpha_heatmap, frame1, 1 - alpha_heatmap, 0, frame1) cv2.addWeighted(heatmap, alpha_backgroud, frame1, 1 - alpha_backgroud, 0, frame1) frame2 = cv2.resize(frame1, (698, 396)) output = frame.copy() heatmap_image = frame2
class Moment(object): def __init__(self): self.stop = False logging.basicConfig() self.logger = logging.getLogger("MMNT") if DEBUG: self.logger.setLevel(logging.DEBUG) else: self.logger.setLevel(logging.INFO) self.logger.info("Initializing") self.masterSampleTime = time.time() self.slaveSampleTime = time.time() self.humanSampleTime = time.time() self.micSampleTime = time.time() self.logger.debug("Initializing motor control") self.mc = MotorControl() self.mc.resetMotors() self.logger.debug("Initializing microphone") dev = usb.core.find(idVendor=0x2886, idProduct=0x0018) if not dev: sys.exit("Could not find ReSpeaker Mic Array through USB") self.mic = Tuning(dev) self.mic.write("NONSTATNOISEONOFF", 1) self.mic.write("STATNOISEONOFF", 1) self.mic.write("ECHOONOFF", 1) self.logger.debug("Initializing video streams") self.topCamStream = VideoStream(1) self.botCamStream = VideoStream(2) self.logger.debug("Initializing models") self.ht_model = ht.get_model() self.tfPose = TfPoseEstimator(get_graph_path(TF_MODEL), target_size=(VideoStream.DEFAULT_WIDTH, VideoStream.DEFAULT_HEIGHT)) self.logger.info("Initialization complete") self.topCamState = State.IDLE self.botCamState = State.IDLE self.topCamAngle = 0 self.topAngleUpdated = False self.botCamAngle = 180 self.botAngleUpdated = False self.master = Cams.TOP self.lastMaster = Cams.TOP self.botCamProc = None self.topCamProc = None self.audioMap = Map(15) self.checkMic() def stop(self): self.stop = True def updateTopAngle(self, angle): if abs(angle - self.topCamAngle) > ANGLE_THRESHOLD and abs( angle - self.botCamAngle) > OVERLAP_THRESHOLD: self.topCamAngle = angle self.topAngleUpdated = True def updateBotAngle(self, angle): if abs(angle - self.botCamAngle) > ANGLE_THRESHOLD and abs( angle - self.topCamAngle) > OVERLAP_THRESHOLD: self.botCamAngle = angle self.botAngleUpdated = True def updatePositions(self): # Send Serial Commands if self.topAngleUpdated and self.botAngleUpdated: self.logger.debug("Top Angle: {}".format(self.topCamAngle)) self.logger.debug("Bot Angle: {}".format(self.botCamAngle)) self.topAngleUpdated = False self.botAngleUpdated = False self.mc.runMotors(self.topCamAngle, self.botCamAngle) elif self.topAngleUpdated: self.logger.debug("Top Angle: {}".format(self.topCamAngle)) self.topAngleUpdated = False self.mc.runTopMotor(self.topCamAngle) elif self.botAngleUpdated: self.logger.debug("Bot Angle: {}".format(self.botCamAngle)) self.botAngleUpdated = False self.mc.runBotMotor(self.botCamAngle) def isWithinNoiseFov(self, angle): topDiff = abs(angle - self.topCamAngle) botDiff = abs(angle - self.botCamAngle) if topDiff < NOISE_ANGLE_THRESHOLD: self.topCamState |= State.NOISE if self.topCamState == State.BOTH: self.master = Cams.TOP return True else: self.topCamState &= ~State.NOISE if botDiff < NOISE_ANGLE_THRESHOLD: self.botCamState |= State.NOISE if self.botCamState == State.BOTH: self.master = Cams.BOT return True else: self.botCamState &= ~State.NOISE return False def checkMic(self): speechDetected, micDOA = self.mic.speech_detected(), self.mic.direction if not speechDetected: # self.audioMap.update_map_with_no_noise() self.topCamState &= ~State.NOISE self.botCamState &= ~State.NOISE return self.logger.debug("speech detected from {}".format(micDOA)) self.audioMap.update_map_with_noise(micDOA) primaryMicDOA, secondaryMicDOA = self.audioMap.get_POI_location() if DEBUG: self.audioMap.print_map() if primaryMicDOA == -1: self.logger.debug("no good audio source") return self.logger.debug("mapped audio from {}".format(primaryMicDOA)) # Check if camera is already looking at the primary noise source if self.isWithinNoiseFov(primaryMicDOA): # If camera is already looking, check the secondary noise source if secondaryMicDOA == -1: self.logger.debug("no good secondary audio source") return elif self.isWithinNoiseFov(secondaryMicDOA): return else: micDOA = secondaryMicDOA else: micDOA = primaryMicDOA topDiff = abs(micDOA - self.topCamAngle) botDiff = abs(micDOA - self.botCamAngle) # Camera is NOT looking at the noise source at this point # If both Cameras are not tracking a human, # move the closest camera if self.topCamState < State.HUMAN and self.botCamState < State.HUMAN: if botDiff < topDiff: self.botCamState |= State.NOISE self.updateBotAngle(micDOA) if self.botCamState == State.IDLE: self.master = Cams.TOP else: self.topCamState |= State.NOISE self.updateTopAngle(micDOA) if self.topCamState == State.IDLE: self.master = Cams.BOT # One of the cameras are on a human, if the other camera is not on a human, move it elif self.topCamState < State.HUMAN: self.topCamState |= State.NOISE self.updateTopAngle(micDOA) self.master = Cams.BOT elif self.botCamState < State.HUMAN: self.botCamState |= State.NOISE self.updateBotAngle(micDOA) self.master = Cams.TOP # The cameras are on a human else: # If both are on a human, move the one that's not master if self.topCamState == State.HUMAN and self.botCamState == State.HUMAN: if self.master != Cams.BOT: self.botCamState |= State.NOISE self.updateBotAngle(micDOA) else: self.topCamState |= State.NOISE self.updateTopAngle(micDOA) # One of the cameras are on a HUMAN+NOISE, move the one that's not only on a HUMAN elif self.topCamState == State.HUMAN: self.topCamState |= State.NOISE self.updateTopAngle(micDOA) self.master = Cams.BOT elif self.botCamState == State.HUMAN: self.botCamState |= State.NOISE self.updateBotAngle(micDOA) self.master = Cams.TOP def getBestFace(self, humans): midX = -1 bestHuman = humans[0] maxScore = 0 for human in humans: gotMidX = False score = 0 currMidX = -1 for part in headParts: if part in human.body_parts: score += human.body_parts[part].score if not gotMidX: currMidX = human.body_parts[ part].x * VideoStream.DEFAULT_WIDTH gotMidX = True if score > maxScore: maxScore = score midX = currMidX bestHuman = human return bestHuman, midX def checkHumans(self, frame, camera): humans = self.tfPose.inference(frame, resize_to_default=True, upsample_size=RESIZE_RATIO) if len(humans): if camera == Cams.TOP: self.topCamState |= State.HUMAN if self.topCamState == State.BOTH: self.master = Cams.TOP else: self.botCamState |= State.HUMAN if self.botCamState == State.BOTH: self.master = Cams.BOT if DISPLAY_VIDEO and DRAW_ON_FRAME: TfPoseEstimator.draw_humans(frame, humans, imgcopy=False) human, midX = self.getBestFace(humans) if (ht.is_hands_above_head(human)): self.logger.debug("HANDS ABOVE HEAD!!!") if midX != -1: centerDiff = abs(midX - VideoStream.DEFAULT_WIDTH / 2) if centerDiff > FACE_THRESHOLD: if midX < VideoStream.DEFAULT_WIDTH / 2: # rotate CCW if camera == Cams.TOP: self.updateTopAngle( (self.topCamAngle + centerDiff * degreePerPixel) % 360) else: self.updateBotAngle( (self.botCamAngle + centerDiff * degreePerPixel) % 360) elif midX > VideoStream.DEFAULT_WIDTH / 2: # rotate CW if camera == Cams.TOP: self.updateTopAngle( (self.topCamAngle - centerDiff * degreePerPixel) % 360) else: self.updateBotAngle( (self.botCamAngle - centerDiff * degreePerPixel) % 360) else: if camera == Cams.TOP: self.topCamState &= ~State.HUMAN else: self.botCamState &= ~State.HUMAN return frame def playVideo(self, cam): if cam == Cams.TOP: if self.botCamProc is not None and self.botCamProc.poll( ) is not None: self.botCamProc.kill() self.topCamProc = subprocess.Popen( "ffmpeg -f v4l2 -i /dev/video3 -f v4l2 /dev/video5", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) elif cam == Cams.BOT: if self.topCamProc is not None and self.topCamProc.poll( ) is not None: self.topCamProc.kill() self.botCamProc = subprocess.Popen( "ffmpeg -f v4l2 -i /dev/video4 -f v4l2 /dev/video5", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) def start(self): Thread(target=self.run, args=()).start() def run(self): self.stop = False while not self.stop: try: topFrame = self.topCamStream.read() botFrame = self.botCamStream.read() if time.time() - self.humanSampleTime > HUMAN_SAMPLE_FREQ: if topFrame is not None: topFrame = self.checkHumans(topFrame, Cams.TOP) if botFrame is not None: botFrame = self.checkHumans(botFrame, Cams.BOT) self.humanSampleTime = time.time() if time.time() - self.micSampleTime > MIC_SAMPLE_FREQ: self.checkMic() self.micSampleTime = time.time() self.updatePositions() # if DISPLAY_VIDEO and topFrame is not None and botFrame is not None: # if self.master == Cams.TOP: # if topFrame is not None: # cv.imshow('Master', topFrame) # if botFrame is not None: # cv.imshow('Slave', botFrame) # else: # if botFrame is not None: # cv.imshow('Master', botFrame) # if topFrame is not None: # cv.imshow('Slave', topFrame) # if cv.waitKey(1) == 27: # pass if DISPLAY_VIDEO and topFrame is not None and botFrame is not None: if self.master == Cams.TOP: top_master = np.concatenate((topFrame, botFrame), axis=1) cv.imshow('Master + Slave', top_master) else: bot_master = np.concatenate((botFrame, topFrame), axis=1) cv.imshow('Master + Slave', bot_master) if cv.waitKey(1) == 27: pass except KeyboardInterrupt: self.logger.debug("Keyboard interrupt! Terminating.") break self.mc.resetMotors()
# Initialize video stream videostream = VideoStream(resolution=(imW, imH), framerate=30).start() time.sleep(1) # Create window cv2.namedWindow('Object detector', cv2.WINDOW_NORMAL) #for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True): while True: # Start timer (for calculating frame rate) t1 = cv2.getTickCount() # Grab frame from video stream frame1 = videostream.read() # Acquire frame and resize to expected shape [1xHxWx3] frame = frame1.copy() frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame_resized = cv2.resize(frame_rgb, (width, height)) input_data = np.expand_dims(frame_resized, axis=0) # Normalize pixel values if using a floating model (i.e. if model is non-quantized) if floating_model: input_data = (np.float32(input_data) - input_mean) / input_std # Perform the actual detection by running the model with the image as input interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke()
def main(): ''' Main method ''' global RUNNING # Create a socket and connect to the server s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) PRINT('Connected to ' + ENC_VALUE(HOST + ':' + str(PORT)) + '.', SUCCESS) # connect to the arduino via serial ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1) ser.flush() cr = '\n' # Start the camera video thread stream = VideoStream().start() last_serial_time = time.time( ) # stores the last time a serial value was sent while RUNNING: # Get the current frame read by the video stream try: stream_frame = stream.read() _, frame = cv2.imencode('.jpg', stream_frame, ENCODE_PARAM) # Send data send(s, [frame]) except Exception as e: # Prints Error PRINT(str(e), ERROR) # Recieve data recv_data = recv(s) print(recv_data[1]) # Check if a command was sent if recv_data[ DATA_IDX_COMMAND] == COMMAND_QUIT: # If quit command was recieved RUNNING = false PRINT('Recieved command ' + ENC_VALUE(COMMAND_QUIT) + '.', INFO) joy_vrt = round(4 * (1 - 0)) joy_fwd = round(4 * (1 - 0)) joy_rot = round(4 * (1 + 0)) submit = str(joy_vrt * 100 + joy_fwd * 10 + joy_rot) ser.write(submit.encode('utf-8')) ser.write(cr.encode('utf-8')) RUNNING = False elif time.time() - last_serial_time >= 1: if recv_data and len( recv_data[1]) == 3: # checks if recv data is empty print('running' + str(recv_data)) joy_vrt = round(4 * (1 - recv_data[1][1][3])) joy_fwd = round(4 * (1 - recv_data[1][1][1])) joy_rot = round(4 * (1 + recv_data[1][1][2])) submit = str(joy_vrt * 100 + joy_fwd * 10 + joy_rot) print(submit) ser.write(submit.encode('utf-8')) ser.write(cr.encode('utf-8')) line = ser.readline().decode('utf-8').rstrip() print(line) last_serial_time = time.time() s.close() # Closes socket stream.stop() # Stops stream PRINT('Quit.', SUCCESS)
class ClientHandler(threading.Thread): def __init__(self, sock, addr): threading.Thread.__init__(self) self.sock = sock self.addr = addr self.data_sock = None self.streaming = False self.completed = False self.fnbr = 0 def run(self): self.handle() def terminate(self): # try to send EXIT cmd try: self.cutil.send(EXIT) except: # error if client closed pass self.sock.close() # if stream is live, quit stream if self.streaming: self.streaming = False self.streamer.join() if self.data_sock: self.data_sock.close() # control socket def handle(self): util = Util(self.sock) self.cutil = util # get video filename # TODO: handle unknow filename filename = util.recv().decode('ascii') cmd = util.recv().decode('ascii') if cmd != SETUP: self.terminate() return util.send(OK) # Start video capture self.vs = VideoStream(filename) while True: cmd = util.recv().decode('ascii') if cmd == EXIT: self.terminate() return elif cmd == PLAY: self.streamer = threading.Thread(target=self.start_streaming, args=()) self.streamer.start() elif cmd == HEARTBEAT: # TODO: handle if no HEARTBEAT received for long pass if self.completed: self.terminate() return # data socket def start_streaming(self): self.streaming = True print('Started streaming...', self.addr) while self.streaming: data = self.vs.read() if data == None: print("Finished streaming...", self.addr) self.streaming = False break try: self.data_sock.sendall(data) except Exception as e: print('Client closed...') self.streaming = False self.completed = True break self.fnbr += 1 print(f'Sent frame {self.fnbr}', end='\r') self.data_sock.close() def attach_data_sock(self, sock): self.data_sock = sock