def _test_extremely_large_data(device): in_size = 30000 out_size = 10 channels = 3 def get_data(): out = np.full([in_size, in_size, channels], 42, dtype=np.uint8) for c in range(channels): out[in_size - 1, in_size - 1, c] = c return [out] pipe = Pipeline(1, 3, 0, prefetch_queue_depth=1) input = fn.external_source(source=get_data, device=device) rotated = fn.warp_affine(input, matrix=[-1, 0, in_size, 0, -1, in_size], fill_value=255.0, size=[out_size, out_size], interp_type=types.INTERP_NN) pipe.set_outputs(rotated) pipe.build() out = None try: out, = pipe.run() except RuntimeError as e: if "bad_alloc" in str(e): print("Skipping test due to out-of-memory error:", e) return raise except MemoryError as e: print("Skipping test due to out-of-memory error:", e) return if device == "cpu": out = out.at(0) else: out = out.as_cpu().at(0) assert out.shape == (out_size, out_size, channels) for c in range(channels): assert out[0, 0, c] == c
def dali_frame_splicing_graph(x, nfeatures, x_len, stacking=1, subsampling=1): if stacking > 1: seq = [x] for n in range(1, stacking): f = fn.slice(x, n, x_len, axes=(1, ), out_of_bounds_policy='pad', fill_values=0) seq.append(f) x = fn.cat(*seq, axis=0) nfeatures = nfeatures * stacking if subsampling > 1: out_len = (x_len + subsampling - 1) // subsampling m = fn.transforms.scale(scale=[subsampling, 1], center=[0.5, 0]) x = fn.reshape(x, rel_shape=[1, 1, -1], layout="HWC") # Layout required by WarpAffine size = fn.cat(nfeatures, out_len) x = fn.warp_affine(x, matrix=m, size=size, interp_type=types.INTERP_NN) x = fn.reshape(x, rel_shape=[1, 1], layout="ft") return x