def main(): from qpwcnet.core.util import disable_gpu from qpwcnet.data.triplet_dataset_ops import show_triplet_dataset disable_gpu() dataset = VimeoTriplet(VimeoTripletSettings(data_type='train')) show_triplet_dataset(dataset)
def main(): disable_gpu() # compute_stats() if False: filename = '/media/ssd/datasets/sintel-processed/sintel.tfrecord' reader = get_reader(filename).map(preprocess) else: reader = get_dataset_from_set().map(preprocess_fc3d) # reader = get_dataset().interleave(lambda x: Dataset.from_tensors(x).map(decode_files), # cycle_length=tf.data.experimental.AUTOTUNE, # num_parallel_calls=tf.data.experimental.AUTOTUNE).map(preprocess) reader.shuffle(buffer_size=32) for entry in reader.as_numpy_iterator(): ims, flo = entry flo_vis = flow_to_image(flo) prv = ims[..., :3] nxt = ims[..., 3:] #print('prv', prv.min(), prv.max()) #print('nxt', nxt.min(), nxt.max()) #print('flo', flo.min(), flo.max()) #print('flo', np.linalg.norm(flo, axis=-1).mean()) # show prev reconstructed from nxt. # nxt_w = tfa.image.dense_image_warp(nxt[None, ...].astype( # np.float32)/255.0, -flo[None, ..., ::-1]).numpy() # nxt_w = tf_warp(nxt[None, ...].astype( # np.float32)/255.0, flo[None, ...]).numpy() # flo order : (x,y) == (1,0) nxt_w = tfa.image.dense_image_warp(nxt[None, ...], -flo[None, ..., ::-1])[0].numpy() # nxt_w = tf_warp(nxt[None, ...], flo)[0].numpy() print(nxt_w.shape) cv2.imshow('prv', prv) cv2.imshow('nxt', nxt) # cv2.imshow('msk', prv_has_flo.astype(np.float32)) cv2.imshow('nxt_w', nxt_w) cv2.imshow('nxt_w2', nxt_w - prv) # bgr, prv=b, nxt=g, r=warp overlay = np.stack([(prv).mean(axis=-1), (nxt).mean(axis=-1), (nxt_w).mean(axis=-1)], axis=-1) cv2.imshow('overlay', overlay) cv2.imshow('flo', normalize(flo[..., 0])) cv2.imshow('flo-vis', flo_vis.numpy()) k = cv2.waitKey(0) if k == 27: break
def main(): disable_gpu() data_format = 'channels_first' tf.keras.backend.set_image_data_format(data_format) dataset = setup_input(8, data_format) for imgs, flows in dataset: idx = np.random.randint(8) prv = imgs[idx, :3] nxt = imgs[idx, 3:] flo = flows[idx] flo_rgb = flow_to_image(flo, data_format=data_format) show('prv', 0.5 + prv, True, data_format) show('nxt', 0.5 + nxt, True, data_format) # FLO corresponds to stuff in `prv` show('flo', flo_rgb, True, data_format) k = cv2.waitKey(0) if k in [27, ord('q')]: break
def main(): disable_gpu() data_format = 'channels_last' warp_1 = Warp(data_format=data_format) warp_2 = WarpV2(data_format=data_format) if data_format == 'channels_first': img = tf.random.uniform(shape=(4, 3, 32, 64)) flo = tf.random.normal(shape=(4, 2, 32, 64)) c1 = warp_1((img, flo)) c2 = warp_2((img, flo)) print('diff', tf.reduce_mean(c1 - c2)) else: img = tf.random.uniform(shape=(4, 32, 64, 3)) flo = tf.random.normal(shape=(4, 32, 64, 2)) c1 = warp_1((img, flo)) c2 = warp_2((img, flo)) print('diff', tf.reduce_mean(c1 - c2)) cv2.imshow('c1', c1[0].numpy()) cv2.imshow('c2', c2[0].numpy()) cv2.imshow('diff', tf.abs(c1 - c2)[0].numpy()) cv2.waitKey(0)
ims, flo = image_resize(ims, flo, (256, 512)) ims = ims - 0.5 # Convert to correct data format data_format = tf.keras.backend.image_data_format() if data_format == 'channels_first': ims = einops.rearrange(ims, '... h w c -> ... c h w') flo = einops.rearrange(flo, '... h w c -> ... c h w') return ims, flo if True: tf.keras.backend.set_image_data_format('channels_last') data_format = tf.keras.backend.image_data_format() disable_gpu() # TODO(ycho): Cleanup dataset loading pattern for opt-flow datasets. glob_pattern = '/media/ssd/datasets/sintel-processed/shards/sintel-*.tfrecord' filenames = tf.data.Dataset.list_files(glob_pattern).shuffle(32) # dataset = get_reader(filenames).shuffle(buffer_size=1024).repeat().batch(8) # dataset = get_reader(filenames).batch(8).repeat() dataset = get_reader(filenames).shuffle( buffer_size=32).map(preprocess).batch(1) for ims, flo in dataset: # Unstack `ims`. if data_format == 'channels_first': prv, nxt = einops.rearrange( ims, 'n (k c) h w -> k n c h w', k=2) else: