示例#1
0
    nn = ModelPWCNet(mode='test', options=nn_opts)

    # Read data from mat file
    data_path = 'E:/FISR_Github/data/train/LR_LFR/LR_Surfing_SlamDunk_5seq.mat' # check, the path where your .mat file located.
    data = read_mat_file(data_path, 'LR_data') 
    sz = data.shape  #  check, in our case [N, 5, 96, 96, 3]


    img_pairs = []
    scale = 2 # check, upscaling factor for spatial resolution
    ss = 1 # check, temporal stride (1 or 2)
    pred = np.zeros((sz[0], 8//ss, sz[2], sz[3], 2), dtype=np.float32) # check, in our case
    for num in range(sz[0]):  
        for seq in range(sz[1]-(ss*2-1)):
            rgb_1 = YUV2RGB_matlab(data[num, ss*seq, :, :, :]) # check, since PWC-Net works on RGB images, we have to convert our YUV dataset.
            rgb_2 = YUV2RGB_matlab(data[num, ss*(seq+1), :, :, :])
            rgb_1 = resize(rgb_1, (sz[2] * scale, sz[3] * scale)) # check, for better prediction, we multiply x2 (in larger spatial resolution)
            rgb_2 = resize(rgb_2, (sz[2] * scale, sz[3] * scale))
            img_pairs.append((np.array(rgb_1, dtype=np.uint8), np.array(rgb_2, dtype=np.uint8)))
            img_pairs.append((np.array(rgb_2, dtype=np.uint8), np.array(rgb_1, dtype=np.uint8)))
        # Generate the predictions
        flow = np.array(nn.predict_from_img_pairs(img_pairs, batch_size=1, verbose=False))
        flow_rs = resize(flow, (flow.shape[0], sz[2], sz[3], 2), anti_aliasing=True) / scale
        pred[num, :, :, :, :] = flow_rs
        img_pairs = []
        print(num)

    print(pred.shape)
    write_flow(pred, 'E:/FISR_Github/data/train/flow/LR_Surfing_SlamDunk_5seq_ss{}.flo'.format(ss)) # check
    # display_img_pairs_w_flows(img_pairs, flow_rs.tolist)
示例#2
0
    image_path2 = f'./samples/mpisintel_test_clean_ambush_1_frame_00{pair+1:02d}.png'
    image1, image2 = imread(image_path1), imread(image_path2)
    img_pairs.append((image1, image2))

# Configure the model for inference, starting with the default options
nn_opts = deepcopy(_DEFAULT_PWCNET_TEST_OPTIONS)
nn_opts['verbose'] = True
nn_opts['ckpt_path'] = ckpt_path
nn_opts['batch_size'] = 1
nn_opts['gpu_devices'] = gpu_devices
nn_opts['controller'] = controller

# We're running the PWC-Net-large model in quarter-resolution mode
# That is, with a 6 level pyramid, and upsampling of level 2 by 4 in each dimension as the final flow prediction
nn_opts['use_dense_cx'] = True
nn_opts['use_res_cx'] = True
nn_opts['pyr_lvls'] = 6
nn_opts['flow_pred_lvl'] = 2

# The size of the images in this dataset are not multiples of 64, while the model generates flows padded to multiples
# of 64. Hence, we need to crop the predicted flows to their original size
nn_opts['adapt_info'] = (1, 436, 1024, 2)

# Instantiate the model in inference mode and display the model configuration
nn = ModelPWCNet(mode='test', options=nn_opts)
nn.print_config()

# Generate the predictions and display them
pred_labels = nn.predict_from_img_pairs(img_pairs, batch_size=1, verbose=False)
display_img_pairs_w_flows(img_pairs, pred_labels)
示例#3
0
# We're running the PWC-Net-large model in quarter-resolution mode
# That is, with a 6 level pyramid, and upsampling of level 2 by 4 in each dimension as the final flow prediction
nn_opts['use_dense_cx'] = True
nn_opts['use_res_cx'] = True
nn_opts['pyr_lvls'] = 6
nn_opts['flow_pred_lvl'] = 2

# The size of the images in this dataset are not multiples of 64, while the model generates flows padded to multiples
# of 64. Hence, we need to crop the predicted flows to their original size
nn_opts['adapt_info'] = (1, 436, 1024, 2)

nn = ModelPWCNet(mode='test', options=nn_opts)
# nn.print_config()

pred_labels = nn.predict_from_img_pairs(img_pairs[0:50], batch_size=1, verbose=False)
zmf_save_flows(names[0:50], pred_labels, save_addr)
print(1)
pred_labels = nn.predict_from_img_pairs(img_pairs[50:100], batch_size=1, verbose=False)
zmf_save_flows(names[50:100], pred_labels, save_addr)
print(2)
pred_labels = nn.predict_from_img_pairs(img_pairs[100:150], batch_size=1, verbose=False)
zmf_save_flows(names[100:150], pred_labels, save_addr)
print(3)
pred_labels = nn.predict_from_img_pairs(img_pairs[150:200], batch_size=1, verbose=False)
zmf_save_flows(names[150:200], pred_labels, save_addr)
print(4)
pred_labels = nn.predict_from_img_pairs(img_pairs[200:250], batch_size=1, verbose=False)
zmf_save_flows(names[200:250], pred_labels, save_addr)
print(5)
pred_labels = nn.predict_from_img_pairs(img_pairs[250:300], batch_size=1, verbose=False)