예제 #1
0
def main():
    # Instance creation
    cap = EasyPySpin.VideoCapture(0)

    # Checking if it's connected to the camera
    if not cap.isOpened():
        print("Camera can't open\nexit")
        return -1

    # Set the camera parameters
    cap.set(cv2.CAP_PROP_EXPOSURE, -1)  #-1 sets exposure_time to auto
    cap.set(cv2.CAP_PROP_GAIN, -1)  #-1 sets gain to auto

    # Start capturing
    while True:
        ret, frame = cap.read()
        #frame = cv2.cvtColor(frame, cv2.COLOR_BayerBG2BGR) #for RGB camera demosaicing

        img_show = cv2.resize(frame, None, fx=0.25, fy=0.25)
        cv2.imshow("press q to quit", img_show)
        key = cv2.waitKey(30)
        if key == ord("q"):
            break

    cv2.destroyAllWindows()
    cap.release()
예제 #2
0
def main():
    cap = EasyPySpin.VideoCaptureEX(0)

    print("Press key to change average number")
    print("k : average_num += 1")
    print("j : average_num -= 1")
    print("--------------------")
    print("average num: ", cap.average_num)

    while True:
        ret, frame = cap.read()

        img_show = cv2.resize(frame, None, fx=0.25, fy=0.25)
        cv2.imshow("press q to quit", img_show)

        key = cv2.waitKey(30)
        if key == ord("q"):
            break
        elif key == ord("k"):
            cap.average_num += 1
            print("average num: ", cap.average_num)
        elif key == ord("j"):
            cap.average_num -= 1
            if cap.average_num < 1:
                cap.average_num = 1
            print("average num: ", cap.average_num)

    cv2.destroyAllWindows()
    cap.release()
예제 #3
0
def main():
    cap0 = EasyPySpin.VideoCapture(0)
    cap1 = EasyPySpin.VideoCapture(1)
    
    for n in range(NUM_IMAGES):
        ret0, frame0 = cap0.read()
        ret1, frame1 = cap1.read()

        filename0 = "multiple-{0}-{1}.png".format(n, 0)
        filename1 = "multiple-{0}-{1}.png".format(n, 1)
        cv2.imwrite(filename0, frame0)
        cv2.imwrite(filename1, frame1)
        print("Image saved at {}".format(filename0))
        print("Image saved at {}".format(filename1))
        print()

    cap0.release()
    cap1.release()
예제 #4
0
    def __init__(self):
        super().__init__('ImgPublisher')
        self.publisher_ = self.create_publisher(msg.Image, 'FLIR_IMAGE')
        self.parser = argparse.ArgumentParser()
        self.parser.add_argument("-i",
                                 "--index",
                                 type=int,
                                 default=0,
                                 help="Camera index (Default: 0)")
        self.parser.add_argument("-e",
                                 "--exposure",
                                 type=float,
                                 default=-3,
                                 help="Exposure time [us] (Default: Auto)")
        self.parser.add_argument("-g",
                                 "--gain",
                                 type=float,
                                 default=-1,
                                 help="Gain [dB] (Default: Auto)")
        self.parser.add_argument("-G",
                                 "--gamma",
                                 type=float,
                                 help="Gamma value")
        self.parser.add_argument("-b",
                                 "--brightness",
                                 type=float,
                                 help="Brightness [EV]")
        self.parser.add_argument("-f",
                                 "--fps",
                                 type=float,
                                 help="FrameRate [fps]")
        self.parser.add_argument(
            "-s",
            "--scale",
            type=float,
            default=1,
            help="Image scale to show (>0) (Default: 0.25)")
        args = self.parser.parse_args()
        self.cap = EasyPySpin.VideoCapture(0)
        print("==========CAMERA SETTING==========")
        print("camera idx: ", args.index)
        print("exposure: ", args.exposure)
        print("gain: ", args.gain)
        print("gamma: ", args.gamma)
        print("brightness: ", args.brightness)
        print("frame rate: ", self.cap._get_FrameRate())
        print("scale: ", args.scale)
        self.cap.set(cv2.CAP_PROP_EXPOSURE,
                     args.exposure)  #-1 sets exposure_time to auto
        self.cap.set(cv2.CAP_PROP_GAIN, args.gain)  #-1 sets gain to auto
        self.cap.cam.PixelFormat.SetValue(PySpin.PixelFormat_BayerGB8)

        if args.gamma is not None: self.cap.set(cv2.CAP_PROP_GAMMA, args.gamma)
        if args.fps is not None: self.cap.set(cv2.CAP_PROP_FPS, args.fps)
        if args.brightness is not None:
            self.cap.set(cv2.CAP_PROP_BRIGHTNESS, args.brightness)
예제 #5
0
def main():
    cap_primary = EasyPySpin.VideoCapture(0)
    cap_secondary = EasyPySpin.VideoCapture(1)

    cap_primary.set(cv2.CAP_PROP_TRIGGER, True)  #TriggerMode -> On
    #import PySpin
    #cap_primary.cam.TriggerSource.SetValue(PySpin.TriggerSource_Software)

    cap_sync = EasyPySpin.SynchronizedVideoCapture(cap_primary, cap_secondary)

    while True:
        ret, frame = cap_sync.read()
        frame_primary = frame[0]
        frame_secondary = frame[1]

        img_show_primary = cv2.resize(frame_primary, None, fx=SCALE, fy=SCALE)
        img_show_secondary = cv2.resize(frame_secondary,
                                        None,
                                        fx=SCALE,
                                        fy=SCALE)
        cv2.imshow("primary", img_show_primary)
        cv2.imshow("secondary", img_show_secondary)
        key = cv2.waitKey(1)
        if key == ord("q"):
            break
        elif key == ord("c"):
            import datetime
            time_stamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
            filename0 = "synchronized-{0}-{1}.png".format(time_stamp, 0)
            filename1 = "synchronized-{0}-{1}.png".format(time_stamp, 1)
            cv2.imwrite(filename0, frame_primary)
            cv2.imwrite(filename1, frame_secondary)
            print("Image saved at {}".format(filename0))
            print("Image saved at {}".format(filename1))
            print()

    cv2.destroyAllWindows()
    cap_sync.release()
예제 #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i",
                        "--index",
                        type=int,
                        default=0,
                        help="Camera index (Default: 0)")
    parser.add_argument("-g",
                        "--gain",
                        type=float,
                        default=0,
                        help="Gain [dB] (Default: 0)")
    parser.add_argument("--min",
                        type=float,
                        default=5000,
                        help="Minimum exposure time [us]")
    parser.add_argument("--max",
                        type=float,
                        default=500000,
                        help="Maximum exposure time [us]")
    parser.add_argument("--num",
                        type=int,
                        default=8,
                        help="Number of images to capture")
    parser.add_argument("-o",
                        "--output",
                        type=str,
                        default="capture_hdr.exr",
                        help="Output file name (*.exr)")
    args = parser.parse_args()

    cap = EasyPySpin.VideoCaptureEX(args.index)

    cap.set(cv2.CAP_PROP_GAMMA, 1.0)
    cap.set(cv2.CAP_PROP_GAIN, args.gain)

    print("Start capturing HDR image")
    ret, img_hdr = cap.readHDR(args.min, args.max, args.num)

    print("Write {}".format(args.output))
    cv2.imwrite(args.output, img_hdr.astype(np.float32))
예제 #7
0
def test_ar_reader() -> bool:
    """ARマーカーが認識できているか確認し、撮影を始めるか決める

    Returns:
        bool: qキーでFalse、sキーでTrueが返る
    """
    cap = EasyPySpin.VideoCaptureEX(0)
    # cap.set(cv2.CAP_PROP_EXPOSURE, CAM_EXPOSURE_US)
    # cap.set(cv2.CAP_PROP_GAIN, CAM_GAIN)
    # cap.set(cv2.CAP_PROP_GAMMA, 1.0)
    cap.average_num = CAM_AVERAGE

    while True:
        _, frame = cap.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_BayerBG2BGR)

        # ARマーカー検出
        corners, ids, rejected_corners = aruco.detectMarkers(frame, ar_dict)
        aruco.drawDetectedMarkers(frame, corners, ids, (0, 255, 0))
        aruco.drawDetectedMarkers(frame,
                                  rejected_corners,
                                  borderColor=(0, 0, 255))

        if len(corners) > 0:
            # 各ARマーカーについて
            for ar_corner in corners:
                # カメラに写った中心座標を計算
                ar_center = ar_corner[0].mean(axis=0).astype(np.int32)
                frame = cv2.circle(frame, tuple(ar_center), 7, (0, 0, 255), -1)

        frame = cv2.resize(frame, None, fx=0.5, fy=0.5)
        cv2.imshow("[q] to quit(cancel), [s] to start calibration.", frame)
        key = cv2.waitKey(50)
        if key == ord("q"):
            cap.release()
            cv2.destroyAllWindows()
            return False
        elif key == ord("s"):
            cap.release()
            cv2.destroyAllWindows()
            return True
def choose_run_mode(args):
    """
    video or webcam or FILR camera
    """
    global out_file_path
    if args.video:
        # Open the video file
        if not os.path.isfile(args.video):
            print("Input video file ", args.video, " doesn't exist")
            sys.exit(1)
        cap = cv.VideoCapture(args.video)
        out_file_path = str(out_file_path / (args.video[:-4] + '_tf_out.mp4'))
    else:
        #FILR相机输入
        cap = EasyPySpin.VideoCapture(1)
        # Webcam输入
        #cap = cv.VideoCapture(0)
        # 设置摄像头像素值(不能对FILR相机使用)
        #cap.set(cv.CAP_PROP_FRAME_WIDTH, cam_width)
        #cap.set(cv.CAP_PROP_FRAME_HEIGHT, cam_height)
        out_file_path = str(out_file_path / 'webcam_tf_out.mp4')
    return cap
예제 #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i",
                        "--index",
                        type=int,
                        default=0,
                        help="Camera index (Default: 0)")
    parser.add_argument("-e",
                        "--exposure",
                        type=float,
                        default=-3,
                        help="Exposure time [us] (Default: Auto)")
    parser.add_argument("-g",
                        "--gain",
                        type=float,
                        default=-1,
                        help="Gain [dB] (Default: Auto)")
    parser.add_argument("-G", "--gamma", type=float, help="Gamma value")
    parser.add_argument("-b",
                        "--brightness",
                        type=float,
                        help="Brightness [EV]")
    parser.add_argument("-f", "--fps", type=float, help="FrameRate [fps]")
    parser.add_argument("-s",
                        "--scale",
                        type=float,
                        default=1,
                        help="Image scale to show (>0) (Default: 0.25)")
    args = parser.parse_args()
    cap = EasyPySpin.VideoCapture(0)
    print("==========CAMERA SETTING==========")
    print("camera idx: ", args.index)
    print("exposure: ", args.exposure)
    print("gain: ", args.gain)
    print("gamma: ", args.gamma)
    print("brightness: ", args.brightness)
    print("frame rate: ", cap._get_FrameRate())
    print("scale: ", args.scale)
    print("WIDTH, HEIGHT:", )
    if not cap.isOpened():
        print("Camera can't open\nexit")
        return -1

    cap.set(cv2.CAP_PROP_EXPOSURE,
            args.exposure)  #-1 sets exposure_time to auto
    cap.set(cv2.CAP_PROP_GAIN, args.gain)  #-1 sets gain to auto
    cap.cam.PixelFormat.SetValue(PySpin.PixelFormat_BayerGB8)

    if args.gamma is not None: cap.set(cv2.CAP_PROP_GAMMA, args.gamma)
    if args.fps is not None: cap.set(cv2.CAP_PROP_FPS, args.fps)
    if args.brightness is not None:
        cap.set(cv2.CAP_PROP_BRIGHTNESS, args.brightness)

    while True:
        ret, frame = cap.read()
        img_show = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
        img_show = cv2.cvtColor(img_show, cv2.COLOR_BayerGB2RGB)
        print(img_show.shape)
        cv2.imshow("capture", img_show)
        key = cv2.waitKey(30)
        if key == ord("q"):
            break
        elif key == ord("c"):
            import datetime
            time_stamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
            filepath = time_stamp + ".png"
            cv2.imwrite(filepath, frame)
            print("Save Image -  > ", filepath)

    cv2.destroyAllWindows()
    cap.release()
예제 #10
0
config = tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True

return_elements = [
    "input/input_data:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0",
    "pred_lbbox/concat_2:0"
]
pb_file = "./yolov3_coco.pb"
#video_path      = "./docs/images/road.mp4"
video_path = 1
num_classes = 80
input_size = 416
graph = tf.Graph()
return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements)

cap = EasyPySpin.VideoCapture(0)
parser = argparse.ArgumentParser()
parser.add_argument("-i",
                    "--index",
                    type=int,
                    default=0,
                    help="Camera index (Default: 0)")
parser.add_argument("-e",
                    "--exposure",
                    type=float,
                    default=-3,
                    help="Exposure time [us] (Default: Auto)")
parser.add_argument("-g",
                    "--gain",
                    type=float,
                    default=-1,
#尝试mobile_thin
#estimator = load_pretrain_model('mobilenet_thin')
#estimator = load_pretrain_model('mobilenet_small')
estimator = load_pretrain_model('VGG_origin')
action_classifier = load_action_premodel('Action/own_stand_wave_08.h5')

# 参数初始化
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

#获取被控相机
cap_Receptor = EasyPySpin.VideoCapture(0)

# 获取主控相机
cap_main = choose_run_mode(args)

# 读写视频文件
video_writer = set_video_writer(cap_main, write_fps=int(7.0))
# print("DEBUG:stage 1")

# # 保存多组关节数据的txt文件,用于训练过程(for training),重命名为wave_*.txt/stand_*.txt
# f = open('test_out/origin_data.txt', 'a+')

while cv.waitKey(1) < 0:
    #print("DEBUG:stage 2")
    has_frame, show = cap_main.read()
예제 #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i",
                        "--index",
                        type=int,
                        default=0,
                        help="Camera index (Default: 0)")
    parser.add_argument("-g",
                        "--gain",
                        type=float,
                        default=0,
                        help="Gain [dB] (Default: 0)")
    parser.add_argument("--min",
                        type=float,
                        default=50000,
                        help="Minimum exposure time [us]")
    parser.add_argument("--max",
                        type=float,
                        default=500000,
                        help="Minimum exposure time [us]")
    parser.add_argument("--num",
                        type=float,
                        default=5,
                        help="Number of images to capture")
    parser.add_argument("-o",
                        "--output",
                        type=str,
                        default="hdr.exr",
                        help="Output file name (*.exr)")
    args = parser.parse_args()

    cap = EasyPySpin.VideoCapture(args.index)

    cap.set(cv2.CAP_PROP_GAMMA, 1.0)
    cap.set(cv2.CAP_PROP_GAIN, args.gain)

    times_us = np.geomspace(args.min, args.max, num=args.num).tolist()

    print("Start capturing")
    images = []
    for i, time_to_set in enumerate(times_us):
        cap.set(cv2.CAP_PROP_EXPOSURE, time_to_set)

        _ = cap.read(
        )  #Ignore first frame (Because it may be the image taken before the exposure time was changed)

        ret, frame = cap.read()
        images.append(frame)

        print("Image {0}/{1}".format(i + 1, args.num))
        print("    ExposureTime: {}\n".format(time_to_set))

    cap.release()
    print("End capturing\n")

    print("Merge HDR")
    times = [t * (1e-6) for t in times_us]  #us -> s
    img_hdr = merge_hdr(images, times)

    print("Write {}".format(args.output))
    cv2.imwrite(args.output, img_hdr.astype(np.float32))
예제 #13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i",
                        "--index",
                        type=int,
                        default=0,
                        help="Camera index (Default: 0)")
    parser.add_argument("-e",
                        "--exposure",
                        type=float,
                        default=-1,
                        help="Exposure time [us] (Default: Auto)")
    parser.add_argument("-g",
                        "--gain",
                        type=float,
                        default=-1,
                        help="Gain [dB] (Default: Auto)")
    parser.add_argument("-G", "--gamma", type=float, help="Gamma value")
    parser.add_argument("-b",
                        "--brightness",
                        type=float,
                        help="Brightness [EV]")
    parser.add_argument("-f", "--fps", type=float, help="FrameRate [fps]")
    parser.add_argument("-s",
                        "--scale",
                        type=float,
                        default=0.25,
                        help="Image scale to show (>0) (Default: 0.25)")
    args = parser.parse_args()

    cap = EasyPySpin.VideoCapture(args.index)

    if not cap.isOpened():
        print("Camera can't open\nexit")
        return -1

    cap.set(cv2.CAP_PROP_EXPOSURE,
            args.exposure)  #-1 sets exposure_time to auto
    cap.set(cv2.CAP_PROP_GAIN, args.gain)  #-1 sets gain to auto
    if args.gamma is not None: cap.set(cv2.CAP_PROP_GAMMA, args.gamma)
    if args.fps is not None: cap.set(cv2.CAP_PROP_FPS, args.fps)
    if args.brightness is not None:
        cap.set(cv2.CAP_PROP_BRIGHTNESS, args.brightness)

    while True:
        ret, frame = cap.read()

        img_show = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
        cv2.imshow("capture", img_show)
        key = cv2.waitKey(30)
        if key == ord("q"):
            break
        elif key == ord("c"):
            import datetime
            time_stamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
            filepath = time_stamp + ".png"
            cv2.imwrite(filepath, frame)
            print("Export > ", filepath)

    cv2.destroyAllWindows()
    cap.release()
예제 #14
0
def get_corresponds() -> NDArray[(Any, 5), np.float64]:
    """4軸ステージを動かして、ARマーカーの画像座標とステージ位置の対応点を得る

    Returns:
        ndarray: n*5のndarray。各行は、ARマーカーのID(0:0)、
                 画像上のARマーカーの中心座標(1:3)、ステージ位置(3:6)が入る。
    """
    if not (0 <= PAN_ROTATE_RANGE <= 180):
        print("invalid `PAN_ROTATE_RANGE`")
    if not (0 <= TILT_ROTATE_RANGE <= 90):
        print("invalid `PAN_ROTATE_RANGE`")
    if not (0 < PAN_ROTATE_RANGE <= 360):
        print("invalid `PAN_ROTATE_RANGE`")

    stage = Aries()

    # U軸固定モード(`USE_U_AXIS=False`)の場合、
    # 事前にカメラから素材が見えるようにしておく。
    if not USE_U_AXIS:
        pos = list(stage.position)
        if -10 < pos[3] < 10:
            u = -10
            pos[3] = u
            stage.position = pos
        else:
            u = pos[3]

    # カメラ初期設定
    cap = EasyPySpin.VideoCaptureEX(0)
    # cap.set(cv2.CAP_PROP_EXPOSURE, CAM_EXPOSURE_US)
    # cap.set(cv2.CAP_PROP_GAIN, CAM_GAIN)
    # cap.set(cv2.CAP_PROP_GAMMA, 1.0)
    cap.average_num = CAM_AVERAGE

    # 対応点の対象をランダムに決定
    ## 1st half: tilt上側, 2nd half: tilt下側
    xyzs_1st_half = np.random.rand((TRY_XYZS + 1) // 2, 3)
    xyzs_1st_half[:,
                  0] = xyzs_1st_half[:,
                                     0] * PAN_ROTATE_RANGE - PAN_ROTATE_RANGE / 2
    xyzs_1st_half[:, 1] = xyzs_1st_half[:, 1] * TILT_ROTATE_RANGE / 2 + (
        90 - TILT_ROTATE_RANGE / 2)
    xyzs_1st_half[:, 2] = xyzs_1st_half[:, 2] * 45
    ## pan順に並び変える
    xyzs_1st_half = xyzs_1st_half[np.argsort(xyzs_1st_half[:, 0])]

    xyzs_2nd_half = np.random.rand(TRY_XYZS // 2, 3)
    xyzs_2nd_half[:,
                  0] = xyzs_2nd_half[:,
                                     0] * PAN_ROTATE_RANGE - PAN_ROTATE_RANGE / 2
    xyzs_2nd_half[:, 1] = xyzs_2nd_half[:, 1] * TILT_ROTATE_RANGE / 2 + (
        90 - TILT_ROTATE_RANGE)
    xyzs_2nd_half[:, 2] = xyzs_2nd_half[:, 2] * 45
    ## pan順に並び変える
    xyzs_2nd_half = xyzs_2nd_half[np.argsort(xyzs_2nd_half[:, 0])[::-1]]

    xyzs = np.concatenate((xyzs_1st_half, xyzs_2nd_half))

    # Ariesの精度は小数点以下3桁まで
    xyzs = xyzs.round(decimals=2)

    corresponds = []
    schedule = tqdm(xyzs)
    for xyz in schedule:
        if USE_U_AXIS:
            # xyzから、写りが良くなるであろうuを決める
            if xyz[0] >= 0:
                u = np.clip(xyz[0], 10, 80)
            else:
                u = np.clip(xyz[0], -80, -10)

        schedule.set_description(f"[Valid points: {len(corresponds):3d}, " +
                                 f"X: {xyz[0]:3.0f}, Y: {xyz[1]:2.0f}, " +
                                 f"Z: {xyz[2]:3.0f}, U: {u:3.0f}]")

        stage.position = (*xyz, u)
        stage.sleep_until_stop()
        sleep(1)

        _, frame = cap.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_BayerBG2BGR)

        # ARマーカー検出
        # corners: list[NDArray[(同一IDのARマーカーの個数, 4, 2), np.float32]]
        # ids: Optional[NDArray[(検出したARマーカーIDの個数, 1), np.int32]]
        corners, ids, _ = aruco.detectMarkers(frame, ar_dict)

        # 何も検出されなければ次のポジションへ
        # `ids = None`による"TypeError: 'NoneType' object is not iterable"の回避
        if len(corners) == 0:
            continue

        # 各ARマーカーについて
        for ar_corner, ar_id in zip(corners, ids):
            # 予期していないIDが検出された場合無視する
            if ar_id[0] >= len(_AR_ID_TO_WORLD_XYZ_40X40):
                continue

            # カメラに写った中心座標を計算
            ar_center = ar_corner[0].mean(axis=0)

            # 対応点として記録
            corresponds.append(np.concatenate([ar_id, ar_center, xyz]))

    cap.release()
    del stage

    corresponds = np.array(corresponds)
    print("Valid points:", len(corresponds))
    return corresponds
예제 #15
0
def main():
    cap = EasyPySpin.VideoCapture(0)
    if not cap.isOpened():
        print("Calib-Cam-Pos: Camera device error.")
        return 1

    cam_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    cam_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    # 横長画像を想定(cam_width > cam_height)
    left_border = (cam_width - cam_height) // 2
    right_border = cam_width - left_border
    max_object_size = int(cam_height / 2**0.5)
    top_border = (cam_height - max_object_size) // 2
    bottom_border = cam_height - top_border
    dst_imgpoints = np.array(
        ((0, 0), (IMG_SIZE[0], 0), (0, IMG_SIZE[1]),
         (IMG_SIZE[0], IMG_SIZE[1])),
        dtype=np.float32,
    )

    cam_mat = np.load(NPY_FILENAME_FOR_CAMERA_MATRIX)

    stage = Aries()

    while True:
        rot = calib_utils.rot_matrix_from_pan_tilt_roll(*stage.position[0:3])

        _, frame = cap.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_BayerBG2BGR)

        # 射影変換後の画像を表示(`WORLD_LT_RT_LB_RB`と露出の補正用)
        world_lt_rt_lb_rb = rot @ np.array(WORLD_LT_RT_LB_RB).T
        camera_lt_rt_lb_rb = calib_utils.wrap_homogeneous_dot(
            cam_mat, world_lt_rt_lb_rb.T).astype(np.float32)
        img_to_img_mat = cv2.getPerspectiveTransform(camera_lt_rt_lb_rb,
                                                     dst_imgpoints)
        frame_perspectived = cv2.warpPerspective(frame,
                                                 img_to_img_mat,
                                                 IMG_SIZE,
                                                 flags=cv2.INTER_CUBIC)
        cv2.imshow(f"perspective", frame_perspectived)

        # カメラ画像を表示(カメラ位置の補正用)
        frame_preview = frame.copy()

        ## 中心ガイド線
        cv2.line(
            frame_preview,
            (left_border, top_border),
            (right_border, top_border),
            (255, 255, 255),
            5,
        )
        cv2.line(
            frame_preview,
            (left_border, bottom_border),
            (right_border, bottom_border),
            (255, 255, 255),
            5,
        )

        ## 最大素材サイズガイド線
        cv2.line(
            frame_preview,
            (left_border, 0),
            (right_border, cam_height),
            (255, 255, 255),
            5,
        )
        cv2.line(
            frame_preview,
            (right_border, 0),
            (left_border, cam_height),
            (255, 255, 255),
            5,
        )

        ## ステージ位置から推定した画像上の四隅点(右上が赤)
        camera_lt_rt_lb_rb = camera_lt_rt_lb_rb.astype(np.int32)
        camera_lt = tuple(camera_lt_rt_lb_rb[0])
        camera_rt = tuple(camera_lt_rt_lb_rb[1])
        camera_lb = tuple(camera_lt_rt_lb_rb[2])
        camera_rb = tuple(camera_lt_rt_lb_rb[3])
        cv2.circle(frame_preview, camera_lt, 20, (0, 0, 255), 5)
        cv2.circle(frame_preview, camera_rt, 20, (255, 0, 0), 5)
        cv2.circle(frame_preview, camera_lb, 20, (255, 0, 0), 5)
        cv2.circle(frame_preview, camera_rb, 20, (255, 0, 0), 5)

        frame_preview = cv2.resize(frame_preview,
                                   None,
                                   fx=VIEW_SCALE,
                                   fy=VIEW_SCALE)
        cv2.imshow("camera", frame_preview)

        # 射影変換後の最も暗いRBG値と最も明るいRGB値を取得(露出調整用の情報)
        flatten_bgr = frame_perspectived.T.reshape((3, -1))
        min_bgr = flatten_bgr.min(axis=1)
        max_bgr = flatten_bgr.max(axis=1)
        print(
            f"  darkest RGB: ({min_bgr[2]:3d},{min_bgr[1]:3d},{min_bgr[0]:3d})",
            end="\n",
        )
        print(
            f"brightest RGB: ({max_bgr[2]:3d},{max_bgr[1]:3d},{max_bgr[0]:3d})",
            end="\r\033[1A",
        )

        key = cv2.waitKey(25)
        if key == ord("q"):
            break

    cv2.destroyAllWindows()
    cap.release()
    print("\n\n")
    return 0