def test_separated_exec_setup(): batch_size = 128 pipe = Pipeline(batch_size=batch_size, num_threads=3, device_id=None, prefetch_queue_depth={ "cpu_size": 5, "gpu_size": 3 }) inputs, labels = fn.caffe_reader(path=caffe_dir, shard_id=0, num_shards=1) images = fn.image_decoder(inputs, output_type=types.RGB) images = fn.resize(images, resize_x=224, resize_y=224) images_cpu = fn.dump_image(images, suffix="cpu") pipe.set_outputs(images, images_cpu) pipe.build() out = pipe.run() assert (out[0].is_dense_tensor()) assert (out[1].is_dense_tensor()) assert (out[0].as_tensor().shape() == out[1].as_tensor().shape()) a_raw = out[0] a_cpu = out[1] for i in range(batch_size): t_raw = a_raw.at(i) t_cpu = a_cpu.at(i) assert (np.sum(np.abs(t_cpu - t_raw)) == 0)
def test_caffe_reader_cpu(): pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=None) out, _ = fn.caffe_reader(path=caffe_dir, shard_id=0, num_shards=1) pipe.set_outputs(out) pipe.build() for _ in range(3): pipe.run()
def test_compose_change_device(): batch_size = 3 pipe = Pipeline(batch_size, 1, 0) size = fn.uniform(shape=2, range=(300,500)) c = ops.Compose([ ops.ImageDecoder(device="cpu"), ops.Resize(size=size, device="gpu") ]) files, labels = fn.caffe_reader(path=caffe_db_folder, seed=1) pipe.set_outputs(c(files), fn.resize(fn.image_decoder(files).gpu(), size=size)) pipe.build() out = pipe.run() assert isinstance(out[0], dali.backend.TensorListGPU) test_utils.check_batch(out[0], out[1], batch_size=batch_size)