def main(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(('localhost', 51000)) detecter = Detection() dp = Display() ad = Audio() thread = Thread(target=dp.start) thread.setDaemon(True) thread.start() state_before = 0 while True: motor_power, target_dist = detecter.detect() # 走行プログラムと送信 sock.sendall(motor_power.to_bytes(2, 'big') + target_dist.to_bytes(2, 'big')) byte_data = sock.recv(4) #print(byte_data) state = int.from_bytes(byte_data[:2], 'big') ad_flag = int.from_bytes(byte_data[2:], 'big') #print(state) if state != state_before: dp.changeImage(state) if ad_flag == 1: if state_before == 0: ad.play(0) elif state == 5: ad.play(1) elif state_before == 5: ad.play(2) state_before = state
# print(frame.shape) count += 1 if count % 100 == 0: print(basename + " progress: ", count) if lost_frame_count > 20: # Global detector to the rescue img_size = max(frame.shape[0], frame.shape[1]) x_detection = frame.transpose(2, 0, 1) # print(x_detection.shape) detection_result = detector.detect(np.squeeze(x_detection)) # cv2.imwrite("test.jpg", np.squeeze(x_crop).transpose((1,2,0))) dboxes = detection_result["instances"].pred_boxes.tensor.cpu( ).detach().numpy() dscores = detection_result["instances"].scores.cpu().detach( ).numpy() dboxes = dboxes.astype(np.int) if len(dscores) == 0: out.write(frame) continue def window_penalty(dboxes, dscores, center_pos, img_size): new_score = [] for idx in range(len(dscores)):
cv2.circle(frame, (int(tracker.tracker.center_pos[0]), int(tracker.tracker.center_pos[1])), 7, (0, 0, 255), -1) x_crop, scale_z = tracker.get_roi(frame, search_instance_size) # if count == 1338: # cv2.imwrite("result.jpg", np.squeeze(x_crop).transpose(1, 2, 0)) # print(x_crop.shape, scale_z) center_pos = tracker.tracker.center_pos search_region = [(center_pos - search_instance_size/scale_z/2), (center_pos + search_instance_size/scale_z/2)] # print(search_region) drawrect(frame, (int(search_region[0][0]), int(search_region[0][1])), (int(search_region[1][0]), int(search_region[1][1])), (0, 255, 0), 2) # print(x_crop.shape, scale_z) detection_result = detector.detect(np.squeeze(x_crop)) # cv2.imwrite("test.jpg", np.squeeze(x_crop).transpose((1,2,0))) dboxes = detection_result["instances"].pred_boxes.tensor.cpu().detach().numpy() dscores = detection_result["instances"].scores.cpu().detach().numpy() center_pos = tracker.tracker.center_pos # print(tracker.tracker.center_pos, tracker.tracker.size) # print(dboxes, dscores) # print(x_crop.shape) all_outputs = tracker.track(frame, x_crop, scale_z, search_instance_size) # cv2.imwrite("test.jpg", frame) tboxes, tscores = all_outputs['bbox'], all_outputs['best_score'] # print(tboxes, tscores) for idx, dbox in enumerate(dboxes):
required=False, help="Path to the scikit-learn classifier") ap.add_argument("-s", "--scaler", required=False, help="Path to the scikit-learn scaler") args = vars(ap.parse_args()) model_path: str = args.get("model", None) if not model_path: model_path = "./Model/svm.joblib" model = joblib.load(model_path) scaler_path: str = args.get("scaler", None) if not scaler_path: scaler_path = "./Model/svm_scaler.joblib" scaler = joblib.load(scaler_path) image = cv2.imread(args["image"]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) detect = Detection(model, scaler) # Detected barcode image barcode: np.ndarray = detect.detect(image, window_size=(150, 250), orientation=180, pixels_per_cell=(150, 250), cells_per_block=(1, 1), threshold_proba=0.98, threshold_overlap=0.15)