def update(self): # TODO: add core code here global frame_no, id_for_new_vehicle, tracked_vehicles_info, tracked_vehicle_ids, current_tracked_id,tracked_vehicles,vehicle flag = 0 if self.vid is None: return self.window.after(self.delay, self.update) ret, frame = self.vid.get_frame() # if frame_no % 1 != 0: # frame_no += 1 # frame = cv2.resize(frame, (1080, 700)) # frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2RGBA) # self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame)) # self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW) # return self.window.after(self.delay, self.update) if ret: vehicle_ids_this_frame = [] ocr_outputs_this_frame = [] # perform object detection vehicle_imgs, vbbox, bbox = yolo.get_vehicle_imgs(frame, 750, 700) if len(vehicle_imgs) == 0 or len(vbbox) == 0 or len(bbox) == 0: frame_no +=1 frame = cv2.resize(frame, (1080, 700)) frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2RGBA) self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame)) self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW) return self.window.after(self.delay,self.update) # perform object tracking tracked_vehicles_info, id_for_new_vehicle = obj_tracker.track_vehicle( obj_tracker.get_bbox_without_class(bbox)) # create or update the vehicles tracked till now for key in tracked_vehicles_info.keys(): _, centroid_x, centroid_y = tracked_vehicles_info[key] try: v_index = list(map(lambda obj: obj.id, tracked_vehicles)).index(key) # vehicle is present in the current list of vehicles vehicle = tracked_vehicles[v_index] except ValueError as e: # vehicle is not found # if vehicle is not None: # here vehicle refers to the last detected vehicle vehicle = Vehicle(key) tracked_vehicles.append(vehicle) vehicle_ids_this_frame.append(vehicle.id) vehicle.current_bounding_box_centroid = (centroid_x, centroid_y) # find the vehicle's image in the current frame for v_img, v_bbox in zip(vehicle_imgs, vbbox): # vehicle_imgs and vbbox have one-to-one direct correspondence if obj_tracker.check_exact_match(vehicle.current_bounding_box_centroid[0], vehicle.current_bounding_box_centroid[1], v_bbox): vehicle.img_current = v_img vehicle.bbox_current = v_bbox vehicle.bboxes.append(v_bbox) vehicle.vehicle_imgs.append(v_img) # if vehicle has no corresponding image, no point to process it further if vehicle.img_current is None or vehicle.bbox_current is None: tracked_vehicles.remove(vehicle) if self.is_vehicle_out_of_frame(vehicle) is None: pass elif self.is_vehicle_out_of_frame(vehicle) and (not vehicle.is_completely_processed): vehicle.aggregate_ocr() print(vehicle.license_number) if vehicle.license_number != '' and self.is_vehicle_unique(vehicle): self._create_new_line(vehicle.id) self.vehicle_nos[-1].insert(0, vehicle.id) self.vehicle_lps[-1].insert(0, vehicle.license_number) self.vehicle_nos[-1].config(state='readonly') self.vehicle_lps[-1].config(state='readonly') vehicle.is_completely_processed = True if len(tracked_vehicles) == 0: # if no vehicle was detected, just skip frame_no += 1 frame = cv2.resize(frame, (1080, 700)) frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2RGBA) self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame)) self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW) return self.window.after(self.delay,self.update) for v in tracked_vehicles: # perform lp localization if v.id in vehicle_ids_this_frame and (not v.is_completely_processed): lp_imgs, lp_bboxes, frame = localizer.predict_license_plate([v.img_current], frame, [v.bbox_current]) lp_img = lp_imgs[0] v.lp_imgs.append(lp_img) lp_img = cv2.cvtColor(lp_img, cv2.COLOR_RGB2GRAY) ocr_output = ocr.predict_ocr(lp_img) v.license_number_predictions.append(ocr_output if ocr_output is not None else '') ocr_outputs_this_frame.append(ocr_output) # processing done, time for outputs # print(frame_no, ocr_outputs_this_frame) # create the frame to display frame = yolo.draw_bbox(frame, bbox) for v in tracked_vehicles: if v.id in vehicle_ids_this_frame: cx = int(v.current_bounding_box_centroid[0]) cy = int(v.current_bounding_box_centroid[1]) frame = cv2.putText(frame, f'{v.id}', (cx, cy), cv2.FONT_HERSHEY_COMPLEX, 4, (0, 255, 0), 4) # resize the frame for display frame = cv2.resize(frame, (1080, 700)) # cv2.imshow("prediction", frame) frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2RGBA) self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame)) self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW) frame_no += 1 self.window.after(self.delay,self.update)