コード例 #1
0
ファイル: minimal.py プロジェクト: stjordanis/modeld
    in_img1[:, 0] = frames[:, 0:H:2, 0::2]
    in_img1[:, 1] = frames[:, 1:H:2, 0::2]
    in_img1[:, 2] = frames[:, 0:H:2, 1::2]
    in_img1[:, 3] = frames[:, 1:H:2, 1::2]
    in_img1[:, 4] = frames[:, H:H + H // 4].reshape((-1, H // 2, W // 2))
    in_img1[:, 5] = frames[:, H + H // 4:H + H // 2].reshape(
        (-1, H // 2, W // 2))
    return in_img1


imgs_med_model = np.zeros((len(imgs), 384, 512), dtype=np.uint8)
for i, img in tqdm(enumerate(imgs)):
    imgs_med_model[i] = transform_img(img,
                                      from_intr=eon_intrinsics,
                                      to_intr=medmodel_intrinsics,
                                      yuv=True,
                                      output_size=(512, 256))
frame_tensors = frames_to_tensor(np.array(imgs_med_model)).astype(
    np.float32) / 128.0 - 1.0

from tensorflow.keras.models import load_model
supercombo = load_model('supercombo.keras')
print(supercombo.summary())
# Just passing zeros for desire and state
poses = []
state = np.zeros((1, 512))
## Saving lane data
left_lane = []
right_lane = []
コード例 #2
0
ファイル: drive.py プロジェクト: kaishijeng/SuperDrive
    # In this case, a Logitech C920 is used (default for undistortion helper).
    # Just perform chessboard calibration to get the matrices!
    frame = undist.frame(frame)

    # Crop the edges out and try to get to (512,256), since that's what
    # the SuperCombo model uses. Note that this is skewed a bit more
    # to the sky, since my camera can "see" the hood and that probably won't
    # help us in the task of lane detection, so we crop that out
    frame = frame[14:270, 24:536]

    # Then we want to convert this to YUV
    frameYUV = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)

    # Use Comma's transformation to get our frame into a format that SuperCombo likes
    frameYUV = transform_img(frameYUV, from_intr=eon_intrinsics,
                             to_intr=medmodel_intrinsics, yuv=True,
                             output_size=(512, 256)).astype(np.float32) \
        / 128.0 - 1.0

    # We want to push our image in fr1 to fr0, and replace fr1 with
    # the current frame (to feed into the network)
    fr0 = fr1
    fr1 = frameYUV

    # SuperCombo input shape is (12, 128, 256): two consecutive images
    # in YUV space. We concatenate fr0 and fr1 together to get to that
    networkInput = np.concatenate((fr0, fr1))

    # We then want to reshape this into the shape the network requires
    networkInput = networkInput.reshape((1, 12, 128, 256))