Beispiel #1
0
def run_convbase_on_images(src_folder, dst_file, image_shape, force_all=False):
    """
    Run the imported convolutional base on the cropped images.
    Save the result in a file.

    Parameters
    ----------
    src_folder (str):
        The folder of the images which have been cropped.
    dst_file (str):
        The destination file of images which have been processes in the convolutional base.
    force_all (bool), default False.
        If false, process only images which are present in the src_folder, but not the dst_folder.
        If True, delete everything in the dst_folder and process everything in the src_folder.
    Returns
    -------
        None
    """

    if force_all:
        xd = XData()
    else:
        try:
            with open(dst_file, "rb") as f:
                xd = pickle.load(f)
        except FileNotFoundError:
            xd = XData()

    src_images = os.listdir(src_folder)
    dst_images = xd.get_stored_filenames()
    missing_in_dst = list(set(src_images).difference(set(dst_images)))
    convbase = get_VGG16_convbase(image_shape)

    for i, filename in enumerate(missing_in_dst):
        print(f"{i + 1}/{len(missing_in_dst)}: Processing {filename}")
        src_path = os.path.join(src_folder, filename)
        im = cv2.imread(src_path)
        im = normalize_image(im)

        single_image_batch = np.array([im])
        after_convbase_single = convbase.predict(single_image_batch)
        after_convbase_single = after_convbase_single.flatten()

        xd.add_image(filename, after_convbase_single, assert_dimensions=1)

        if i % 100 == 0:
            print("Saving to avoid losing work if interrupted...")
            save_data(xd, dst_file)

    save_data(xd, dst_file)

    return xd
Beispiel #2
0
x_train = x[n_train:]
z_train = z[n_train:]

# ---- Test and viualize ---- #
x_old = x_test[:hist, ...].copy()
frames = []
obs_frames = []
state_frames = []
pf_frames = []
direct_frames = []
for t in range(0 + hist, n_test - 1):
    z_new = z_test[t].copy()
    z_new_test = z_test[t].copy()
    x_new = x_test[t].copy()
    x_hat_pf = pf.step(z_new)
    cv2.imshow("real", normalize_image(x_new))
    cv2.imshow("pf", normalize_image(x_hat_pf))
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
    obs_frames.append(add_border(normalize_image(z_new)))
    state_frames.append(add_border(normalize_image(x_new)))
    pf_frames.append(add_border(normalize_image(x_hat_pf)))
cv2.destroyAllWindows()

# ---- Saves multiple samples as an image ---- #
idxs = np.arange(15, 55, 4, dtype=np.int16)
obs_img = np.concatenate(tuple(np.array(obs_frames)[idxs]), axis=1)
state_img = np.concatenate(tuple(np.array(state_frames)[idxs]), axis=1)
pf_img = np.concatenate(tuple(np.array(pf_frames)[idxs]), axis=1)
full_img = np.concatenate((obs_img, state_img, df_img),
                          axis=0).astype(np.uint8)
Beispiel #3
0
direct_frames = []
for t in range(0+hist,n_test-1):   
    z_new = z_test[t].copy() 
    z_new_test = z_test[t].copy()
    x_new = x_test[t].copy() 
    x_hat_pf = pf.step(z_new)
    plt.subplot(1,2,1)
    plt.imshow(x_hat_pf)
    plt.subplot(1,2,2)
    plt.imshow(x_new)
    plt.show()
    # cv2.imshow("real",normalize_image(x_new))
    # cv2.imshow("pf",normalize_image(x_hat_pf))
    # if cv2.waitKey(1) & 0xFF == ord('q'):
    #     break
    obs_frames.append(add_border(normalize_image(z_new)))       
    state_frames.append(add_border(normalize_image(x_new)))  
    pf_frames.append(add_border(normalize_image(x_hat_pf)))  
# cv2.destroyAllWindows()

# ---- Saves multiple samples as an image ---- #
idxs = np.arange(15,55,4, dtype = np.int16)
obs_img = np.concatenate(tuple(np.array(obs_frames)[idxs]),axis=1)
state_img = np.concatenate(tuple(np.array(state_frames)[idxs]),axis=1)
pf_img = np.concatenate(tuple(np.array(pf_frames)[idxs]),axis=1)
full_img = np.concatenate(( obs_img,state_img, df_img), axis = 0).astype(np.uint8)
matplotlib.image.imsave('samples_pf.png', full_img, cmap='gray')

# ---- Saves a video ---- #  
outputdata = np.array(frames).astype(np.uint8)    
skvideo.io.vwrite("samples.mp4", frames) 
Beispiel #4
0
frames = []
obs_frames = []
state_frames = []
df_frames = []
direct_frames = []
for t in range(0 + hist, n_test - 1):
    z_new = z_test[t].copy()
    z_new_test = z_test[t].copy()
    x_new = x_test[t].copy()
    x_hat_df = df.predict_mean(x_old, z_new)
    x_hat_df = x_hat_df[:, :, 0]
    x_hat_df_like = df.estimate(z_new_test)
    x_hat_df_like = x_hat_df_like[0, :, :, 0]
    x_old[:-1, :, :] = x_old[1:, :, :]
    x_old[-1, :, :] = x_hat_df
    obs_frames.append(add_border(normalize_image(z_new)))
    state_frames.append(add_border(normalize_image(x_new)))
    df_frames.append(add_border(normalize_image(x_hat_df)))
    direct_frames.append(add_border(normalize_image(x_hat_df_like)))
    frame1 = np.concatenate((normalize_image(x_new), normalize_image(z_new)),
                            axis=1)
    frame2 = np.concatenate(
        (normalize_image(x_hat_df), normalize_image(x_hat_df_like)), axis=1)
    frame = np.concatenate((frame1, frame2), axis=0)
    frames.append(frame)

# ---- Saves multiple samples as an image ---- #
idxs = np.arange(15, 55, 4, dtype=np.int16)
obs_img = np.concatenate(tuple(np.array(obs_frames)[idxs]), axis=1)
state_img = np.concatenate(tuple(np.array(state_frames)[idxs]), axis=1)
df_img = np.concatenate(tuple(np.array(df_frames)[idxs]), axis=1)