def gen_kernels(self, runtime, N, C, K, D, H, W, T, R, S, M, P, Q, pad_d, pad_h, pad_w, str_d, str_h, str_w, dil_d, dil_h, dil_w): self.I = TensorDescriptionWrapper(self.I, len(self.I.shape)) self.F = TensorDescriptionWrapper(self.F, len(self.F.shape)) self.O = TensorDescriptionWrapper(self.O, len(self.O.shape)) self.flex_entry_I = self.I.flex_entry() self.flex_entry_F = self.F.flex_entry() self.flex_entry_O = self.O.flex_entry() vec_size = 4 if self.dtype.itemsize == 4 else 8 assert N % 32 == 0, "N dim must be multiple of 32" assert K % vec_size == 0, "K dim must be multiple of %d" % vec_size if self.dtype.type == "flex": clss = "fconv" else: raise TypeError("Type not supported.") self.C = C self.K = K self.M = M self.P = P self.Q = Q self.NCK = (N, C, K) self.TRS = (T, R, S) self.DHW = (D, H, W) self.MPQ = (M, P, Q) self.padding = (pad_d, pad_h, pad_w) self.strides = (str_d, str_h, str_w) self.all_params = (N, C, K, D, H, W, T, R, S, pad_d, pad_h, pad_w, str_d, str_h, str_w) self.dimI = (C, D, H, W, N) self.dimF = (C, T, R, S, K) self.dimF = (K, T, R, S, C) self.dimO = (K, M, P, Q, N) self.dimI2 = (C * D * H * W, N) self.dimF2 = (C * T * R * S, K) self.dimF2t = (K, C * T * R * S) self.dimO2 = (K * M * P * Q, N) self.dimS = (K, 1) self.sizeI = reduce(mul, self.dimI, 1) self.sizeF = reduce(mul, self.dimF, 1) self.sizeO = reduce(mul, self.dimO, 1) self.nOut = reduce(mul, self.MPQ, 1) * K # precompute some multiplications for fast constant memory access WN = W * N HWN = H * WN DHWN = D * HWN RS = R * S RST = T * RS CRST = C * RST KRST = K * RST PQ = P * Q PQM = M * PQ QN = Q * N PQN = P * QN MPQN = M * PQN if CRST > 2**16: assert CRST < 2**16, "Integer division is faster with 16bit numerators" # precompute the magic numbers and shift amounts for integer division magic_PQ = _magic64(PQ) magic_Q = _magic64(Q) magic_RS = _magic32(RST + 32, RS) magic_S = _magic32(RS + 32, S) # flop count for benchmarking self.flops = PQM * K * N * CRST * 2.0 tile_N = 128 if N > 64 else 64 grid_N = _grid_dim(tile_N, N) tiles_CK = (128, 64, 32) if tile_N == 128 else (128, 64) # FPROP # self.fprop_kernels = kernel_specs.xprop_conv_kernels( clss, "fprop", "K", tile_N, grid_N, K, tiles_CK, PQM, RST, _flatten([N, K, D, H, W, WN, HWN, DHWN, C, KRST, RST, RS, magic_RS, S, magic_S, pad_d, pad_h, pad_w, str_d, str_h, str_w, Q, PQ, QN, PQN, MPQN, magic_Q, magic_PQ])) # shared lookup table size self.fprop_lut_size = RST * 4 * 2 # Set to 5 for the current T1000 HW config self.trunc_rows = 32 flags = self.trunc_rows << 8 self.kernels = [] for kernel in self.fprop_kernels: # TODO: Populate alpha and beta parameters (in a separate loop!). # alpha (used to be params[6]) will be multiplied with self.kernels.append([ kernel_specs.get_kernel(kernel[0]), kernel[1], kernel[2], None, 0, self.O, self.I, self.F, 1.0, 0.0, flags, kernel[3]] + kernel[4]) for kernel in self.kernels: kernel.extend((FlexPtrDescription(self.flex_entry_O), 1.0)) kernel[10] &= 0xfffffffe # Enable output flag # record output flex id for autoflex self.output_flex_ids = [self.flex_entry_O.flex_id]
def gen_kernels(self, runtime, N, C, K, D, H, W, T, R, S, M, P, Q, pad_d, pad_h, pad_w, str_d, str_h, str_w, dil_d, dil_h, dil_w): self.E = TensorDescriptionWrapper(self.E, len(self.E.shape)) self.F = TensorDescriptionWrapper(self.F, len(self.F.shape)) self.O = TensorDescriptionWrapper(self.O, len(self.O.shape)) self.flex_entry_E = self.E.flex_entry() self.flex_entry_F = self.F.flex_entry() self.flex_entry_O = self.O.flex_entry() F_size = int(np.prod(self.F.shape) * 2) O_size = int(np.prod(self.O.shape) * 2) vec_size = 4 if self.dtype.itemsize == 4 else 8 assert N % 32 == 0, "N dim must be multiple of 32" assert K % vec_size == 0, "K dim must be multiple of %d" % vec_size if self.dtype.type == "flex": clss = "fconv" else: raise TypeError("Type not supported.") self.C = C self.K = K self.M = M self.P = P self.Q = Q self.NCK = (N, C, K) self.TRS = (T, R, S) self.DHW = (D, H, W) self.MPQ = (M, P, Q) self.padding = (pad_d, pad_h, pad_w) self.strides = (str_d, str_h, str_w) self.all_params = (N, C, K, D, H, W, T, R, S, pad_d, pad_h, pad_w, str_d, str_h, str_w) self.dimI = (C, D, H, W, N) self.dimF = (C, T, R, S, K) self.dimFb = (K, T, R, S, C) self.dimO = (K, M, P, Q, N) self.dimI2 = (C * D * H * W, N) self.dimF2 = (C * T * R * S, K) self.dimF2t = (K, C * T * R * S) self.dimO2 = (K * M * P * Q, N) self.dimS = (K, 1) self.sizeI = reduce(mul, self.dimI, 1) self.sizeF = reduce(mul, self.dimF, 1) self.sizeO = reduce(mul, self.dimO, 1) self.nOut = reduce(mul, self.MPQ, 1) * K # precompute some multiplications for fast constant memory access HW = H * W DHW = D * HW WN = W * N HWN = H * WN DHWN = D * HWN RS = R * S RST = T * RS CRST = C * RST PQ = P * Q PQM = M * PQ QN = Q * N PQN = P * QN MPQN = M * PQN if CRST > 2**16: assert CRST < 2**16, "Integer division is faster with 16bit numerators" # precompute the magic numbers and shift amounts for integer division magic_HW = _magic64(HW) magic_W = _magic64(W) magic_PQ = _magic64(PQ) magic_Q = _magic64(Q) magic_RST = _magic32(CRST, RST) magic_RS = _magic32(RST + 32, RS) magic_S = _magic32(RS + 32, S) magic_str_w = _magic32(W + S, str_w) magic_str_h = _magic32(H + R, str_h) magic_str_d = _magic32(D + T, str_d) # flop count for benchmarking self.flops = PQM * K * N * CRST * 2.0 tile_N = 128 if N > 64 else 64 grid_N = _grid_dim(tile_N, N) tiles_CK = (128, 64, 32) if tile_N == 128 else (128, 64) # BPROP # if C < 16 or C % vec_size != 0: # special kernel for deconv into first layer kernel_name = "%s_bprop_C1_N64" % clss grid = (PQM, _grid_dim(32, CRST), _grid_dim(64, N)) block = (32, 1, 1) self.bprop_kernels = [[kernel_name, grid, block, 0, _flatten([ N, K, D, H, W, WN, HWN, DHWN, C, CRST, RST, magic_RST, RS, magic_RS, S, magic_S, pad_d, pad_h, pad_w, str_d, str_h, str_w, Q, PQ, QN, PQN, MPQN, magic_Q, magic_PQ, CRST * 8 * self.dtype.itemsize, MPQN * 8 * self.dtype.itemsize])]] # generate the kernel args for transpose CRST,K => K,CRST self.shuffle_args = [CRST, K] gridX = (K >> 5) + (K & 31 != 0) gridY = (CRST >> 5) + (CRST & 31 != 0) self.shuffle_grid = (gridX, gridY, 1) self.shuffle_block = (32, 8, 1) self.bprop_zero = self.sizeI * self.dtype.itemsize self.bprop_lut_size = 0 else: self.bprop_kernels = kernel_specs.xprop_conv_kernels( clss, "bprop", "C", tile_N, grid_N, C, tiles_CK, DHW, RST, _flatten([ N, C, M, P, Q, QN, PQN, MPQN, K, CRST, RST, RS, magic_RS, S, magic_S, pad_d, pad_h, pad_w, str_d, str_h, str_w, W, HW, WN, HWN, DHWN, magic_W, magic_HW, R, T, magic_str_w, magic_str_h, magic_str_d])) # generate the kernel args for dim shuffling CRSTK => KRSTC self.shuffle_args = _flatten([ RST * K, RS * K, S * K, K, RST * C, RS * C, S * C, C, RS, magic_RS, S, magic_S]) gridX = (K >> 5) + (K & 31 != 0) gridY = (C >> 5) + (C & 31 != 0) self.shuffle_grid = (gridX, gridY, RST) self.shuffle_block = (32, 8, 1) self.bprop_zero = 0 self.bprop_lut_size = RST * 4 * 2 # Set to 5 for the current T1000 HW config self.trunc_rows = 32 flags = self.trunc_rows << 8 # Must dim shuffle filter data for bprop kernel F_data = ScratchBufferWrapper(F_size, 0, runtime) if self.bprop_zero: Out = ScratchBufferWrapper(O_size, F_size, runtime) shuffle_kernel = _get_transpose_kernel(self.dtype) else: Out = self.O # can point to transpose or dimshuffle kernel shuffle_kernel = _get_shuffle_kernel(self.dtype) shuffle_args = [self.shuffle_grid, self.shuffle_block, None, F_data, self.F] + self.shuffle_args shuffle_kernel = [shuffle_kernel] + shuffle_args # Have to zero output buffer and use type conversion for kernel using atomics if self.bprop_zero: shape = [int(np.prod(self.O.shape[:-1])), self.O.shape[-1]] convert_kernel = _prepare_convert_kernel(Out, "f2", self.O, shape, FlexPtrDescription(self.flex_entry_O)) self.convert_out = True else: self.convert_out = False self.kernels = [] for kernel in self.bprop_kernels: # TODO: Populate alpha and beta parameters (in a separate loop!). # alpha (used to be params[6]) will be multiplied with self.kernels.append([ kernel_specs.get_kernel(kernel[0]), kernel[1], kernel[2], None, 0, Out, self.E, F_data, 1.0, 0.0, flags, kernel[3]] + kernel[4]) for kernel in self.kernels: kernel.extend((FlexPtrDescription(self.flex_entry_O), 1.0)) kernel[10] &= 0xfffffffe # Enable output flag self.kernels = [shuffle_kernel] + self.kernels if self.convert_out: self.kernels.append(convert_kernel) # record output flex id for autoflex self.output_flex_ids = [self.flex_entry_O.flex_id]