def __init__(self, lhs, rhs): shape = tile.Shape(plaidml.DType.BOOLEAN, tile.broadcast_dims(lhs.shape.dims, rhs.shape.dims)) self.lhs = lhs self.rhs = rhs super(Equal, self).__init__('function (L, R) -> (O) { O = (L == R); }', [('L', lhs), ('R', rhs)], [('O', shape)])
def test_tuple_deriv(self): """Test tuples work via derivatives""" A = tile.Value.from_ndims(2) B = tile.Value.from_ndims(2) out_dims = (A.shape.dims[0], B.shape.dims[1]) out_shape = tile.Shape(tile.common_dtype(A.shape.dtype, B.shape.dtype), out_dims) out = tile.Operation( """ function (A[I, K], B[K, J]) -> (O) { T = tuple(A, B); C = element(T, 0); D = element(T, 1); O[i, j : I, J] = +(C[i, k] * D[k, j]); } """, [('A', A), ('B', B)], [('O', out_shape)]).outputs['O'] tot = op.summation(out, [0, 1]) dA = op.gradients(tot, [A])[0] func = tile.compose(self._ctx, self._dev, inputs=[('A', A), ('B', B)], outputs=[('DA', dA)]) invoker = plaidml.Invoker(self._ctx, func) invoker.set_input('A', self.make_inited_tensor((3, 3))) invoker.set_input('B', self.make_inited_tensor((3, 3))) output = self.make_output_tensor(invoker.get_output_shape('DA')) invoker.set_output('DA', output) invoker.invoke()
def __init__(self, lhs, rhs): lmax = ismax(lhs.source.op.value, axes=(lhs.source.op.axis, )) rmax = ismax(rhs.source.op.value, axes=(rhs.source.op.axis, )) and_shape = tile.Shape( plaidml.DType.INT32, tile.broadcast_dims(lmax.shape.dims, rmax.shape.dims)) and_op = tile.Operation( 'function (L, R) -> (O) { O = L ? (R ? 1 : 0) : 0; }', [('L', lmax), ('R', rmax)], [('O', and_shape)]) sum_val = summation(and_op.output_tuple[0], axes=(lhs.source.op.axis, ), keepdims=True) eq_shape = tile.Shape(plaidml.DType.BOOLEAN, sum_val.shape.dims) super(Equal_ArgMax, self).__init__('function (I) -> (O) { O = 0 < I; }', [('I', sum_val)], [('O', eq_shape)])
def __init__(self, data, kernel, auto_pad=None, dilations=None, group=1, kernel_shape=None, pads=None, strides=None): rank = data.shape.ndims - 2 padding = _convert_auto_pad(auto_pad, pads) pads = _extend_pads(pads, rank) if not strides: strides = tuple(1 for _ in range(rank)) if not dilations: dilations = tuple(1 for _ in range(rank)) if not kernel_shape: kernel_shape = kernel.shape.dims else: kernel_shape = tuple([kernel.shape.dims[0], kernel.shape.dims[1]] + list(kernel_shape)) for entry in dilations: if not isinstance(entry, six.integer_types) or entry <= 0: raise ValueError('Invalid dilation_rate: {}'.format(dilations)) if kernel.shape.ndims != rank + 2: raise ValueError( 'Convolution kernel shape inconsistent with input shape: ' + '{} (rank {}) v {} (rank {})'.format( kernel.shape, kernel.shape.ndims - 2, data.shape, data.shape.ndims - 2)) if len(strides) != rank: raise ValueError( 'Convolution strides length inconsistent with input shape: ' + '{} (rank {}) v {} (rank {})'.format(strides, len( strides), data.shape, data.shape.ndims - 2)) if len(dilations) != rank: raise ValueError( 'Convolution dilations length inconsistent with input shape: ' + '{} (rank {}) v {} (rank {})'.format(dilations, len( dilations), data.shape, data.shape.ndims - 2)) conv_strs = _format_conv_strings(rank, data.shape.dims, kernel_shape, strides, padding, pads, dilations, group) outshape = tile.Shape(data.shape.dtype, conv_strs['outshape_tuple']) code = """ function (I[{input_dims_str}], K[{ker_dims_str}]) -> (O) {{ GO[{out_idx_str} : {out_dims_str}] = +(I[{input_idx_str}]*K[{ker_idx_str}]); O = {group_reshape}; }}""".format(**conv_strs) super(Convolution, self).__init__(code, [('I', data), ('K', kernel)], [('O', outshape)])
def testSkipping(self): I = K.variable(np.array([[1., 2.], [3., 4.], [5., -4.], [-5., 6.], [-7., 9.]])) code = """function (I[N, M]) -> (O) { O[2 * i: N] = +(I[2 * i, j]); }""" value = tile.Operation(code, [('I', I)], [('O', tile.Shape(I.shape.dtype, (5,)))], name='Skip') \ .sole_output() npt.assert_allclose(value.eval(), np.array([3., 0., 1., 0., 2.]))
def __init__(self, value, axes, keepdims=False): dims, _, subs = tile.compute_aggregation_axes(value.shape.dims, axes, keepdims) code = """function (I[{src_ranges}]) -> (O) {{ O[{dest_indices}{dest_sep}{dest_ranges}] = +(I[{src_indices}]); }}""".format(**subs) super(Summation, self).__init__(code, [('I', value)], [('O', tile.Shape(value.shape.dtype, dims))])
def testSameMaxPool(self): I = K.variable(np.array([1., 2., 3., 4., 5.])) code = """function (I[N]) -> (O) { O[i: (N + 1) / 2] = >(I[2 * i + j]), j < 2; }""" value = tile.Operation(code, [('I', I)], [('O', tile.Shape(I.shape.dtype, ((I.shape.dims[0] + 1) // 2,)))], name='ValidMaxpool') \ .sole_output() npt.assert_allclose(value.eval(), np.array([2., 4., 5.]))
def testCumSum(self): I = K.variable(np.array([1., 2., 3., 4., 5., 6.])) code = """function (I[N]) -> (O) { O[i: N] = +(I[i - j]), j < N; }""" value = tile.Operation(code, [('I', I)], [('O', tile.Shape(I.shape.dtype, (6,)))], name='CumulativeSum') \ .sole_output() code2 = """function (I[N]) -> (O) { O[i: N] = +(I[k]), i - k < N; }""" value2 = tile.Operation(code, [('I', I)], [('O', tile.Shape(I.shape.dtype, (6,)))], name='CumulativeSum2') \ .sole_output() reference = K.cumsum(I) npt.assert_allclose(value.eval(), reference.eval()) npt.assert_allclose(value2.eval(), reference.eval())
def __init__(self, a, b, c, alpha=None, beta=None, broadcast=True, transA=False, transB=False): if not broadcast and c.shape.ndims != 2: raise NotImplementedError( 'Gemm without multiplier broadcast requires a two-dimensional scalar multiplier; multiplier rank={}' .format(c.shape.ndims)) def gemm_reshape(value): if value.shape.ndims < 2: raise tile.LogicError( 'Invalid Gemm input; two-dimensions required, got: {}'. format(value.shape)) if value.shape.ndims == 2: return value newdims = (value.shape.dims[0], functools.reduce(lambda x, y: x * y, value.shape.dims[1:])) return op.reshape(value, newdims) a = gemm_reshape(a) b = gemm_reshape(b) code = """ function (A[{a_dims}], B[{b_dims}], C) -> (O) {{ OM[row, col : ROW, COL] = +(A[{a_idxs}] * B[{b_idxs}]); OA = {alpha_expr}; CB = {beta_expr}; O = OA + CB; }}""".format( a_dims='MID, ROW' if transA else 'ROW, MID', b_dims='COL, MID' if transB else 'MID, COL', a_idxs='mid, row' if transA else 'row, mid', b_idxs='col, mid' if transB else 'mid, col', alpha_expr='OM * {}'.format(alpha) if alpha else 'OM', beta_expr='C * {}'.format(beta) if beta else 'C', ) outshape = tile.Shape( tile.common_dtype(a.shape.dtype, b.shape.dtype, c.shape.dtype), tile.broadcast_dims(( a.shape.dims[1] if transA else a.shape.dims[0], b.shape.dims[0] if transB else b.shape.dims[1], ), c.shape.dims)) super(Gemm, self).__init__(code, [('A', a), ('B', b), ('C', c)], [('O', outshape)])
def __init__(self, value, axes): dims, _, subs = tile.compute_aggregation_axes(value.shape.dims, axes, True) code = """function (I[{src_ranges}]) -> (O) {{ MAX[{dest_indices}{dest_sep}{dest_ranges}] = >(I[{src_indices}]); O = (MAX == I); }}""".format(**subs) super(IsMax, self).__init__(code, [('I', value)], [('O', tile.Shape(plaidml.DType.BOOLEAN, dims))])
def testBrokenMaxPool(self): I = K.variable(np.array([1., 2., 3., 4., 5.])) code = """function (I[N]) -> (O) { O[i: N / 2] = >(I[2 * i + j]); }""" value = tile.Operation(code, [('I', I)], [('O', tile.Shape(I.shape.dtype, (I.shape.dims[0] // 2,)))], name='BrokenMaxpool') \ .sole_output() reference = K.max(I) npt.assert_allclose(value.eval(), [5.] * 2)
def testMaxOverAxis(self): I = K.variable(np.array([[1., 2., 3.], [4., 5., 6.]])) code = """function (I[M, N]) -> (O) { O[n: N] = >(I[m, n]); }""" value = tile.Operation(code, [('I', I)], [('O', tile.Shape(I.shape.dtype, (I.shape.dims[1],)))], name='MaxOverAxis') \ .sole_output() reference = K.max(I, axis=0) npt.assert_allclose(value.eval(), reference.eval())
def testMeanOverAxis(self): I = K.variable(np.array([[1., 2., 3.], [4., 5., 6.]])) code = """function (I[X, Y]) -> (O) { Sum[y: Y] = +(I[x, y]); O = Sum / X; }""" value = tile.Operation(code, [('I', I)], [('O', tile.Shape(I.shape.dtype, (I.shape.dims[1],)))], name='MeanOverAxis') \ .sole_output() reference = K.mean(I, axis=0) npt.assert_allclose(value.eval(), reference.eval())
def testGlobalMean(self): I = K.variable(np.array([[1., 2., 3.], [4., 5., 6.]])) code = """function (I[X, Y]) -> (O) { Sum[] = +(I[x, y]); O = Sum / (X * Y); }""" value = tile.Operation(code, [('I', I)], [('O', tile.Shape(I.shape.dtype, tuple()))], name='GlobalMean') \ .sole_output() reference = K.mean(I, axis=[0, 1]) npt.assert_allclose(value.eval(), reference.eval())
def testMatMul(self): A = K.variable(np.array([[1., 2., 3.], [4., 5., 6.]])) B = K.variable(np.array([[1., -2.], [-3., 4.], [5., -6.]])) code = """function (A[M, L], B[L, N]) -> (C) { C[i, j: M, N] = +(A[i, k] * B[k, j]); }""" value = tile.Operation(code, [('A', A), ('B', B)], [('C', tile.Shape(A.shape.dtype, (2, 2)))], name='MatMul') \ .sole_output() reference = K.dot(A, B) npt.assert_allclose(value.eval(), reference.eval())
def testConv1D(self): I = K.variable(m(2, 8, 3)) kernel = K.variable(m(3, 3, 2)) code = """function (I[N, L, CI], K[LK, CI, CO]) -> (O) { O[n, x, co: N, L - LK + 1, CO] = +(I[n, x + k, ci] * K[k, ci, co]); }""" value = tile.Operation(code, [('I', I), ('K', kernel)], [('O', tile.Shape(I.shape.dtype, (2, 6, 2)))], name='CumulativeSum') \ .sole_output() reference = K.conv1d(I, kernel, padding='valid') npt.assert_allclose(value.eval(), reference.eval())
def testDilatedConv2D(self): I = K.variable(m(2, 6, 10, 3)) kernel = K.variable(m(3, 2, 3, 2)) code = """function (I[N, Lx, Ly, CI], K[LKx, LKy, CI, CO]) -> (O) { O[n, x, y, co: N, Lx - 2 * (LKx - 1), Ly - 3 * (LKy - 1), CO] = +(I[n, x + 2 * kx, y + 3 * ky, ci] * K[kx, ky, ci, co]); }""" value = tile.Operation(code, [('I', I), ('K', kernel)], [('O', tile.Shape(I.shape.dtype, (2, 2, 7, 2)))], name='CumulativeSum') \ .sole_output() reference = K.conv2d(I, kernel, padding='valid', dilation_rate=(2, 3)) npt.assert_allclose(value.eval(), reference.eval())
def testGlobalMin(self): I = K.variable(np.array([[[1., 2., 3.], [4., 5., 6.]]])) code = """function (I) -> (O) { Neg = -I; O_Neg[] = >(Neg[i, j, k]); O = -O_Neg; }""" value = tile.Operation(code, [('I', I)], [('O', tile.Shape(I.shape.dtype, tuple()))], name='GlobalMin') \ .sole_output() reference = K.min(I, axis=[0, 1, 2]) npt.assert_allclose(value.eval(), reference.eval())
def __init__(self, data, mode=None, pads=None, value=None): if value is None: value = 0. rank = data.shape.ndims in_dims = ['D{}'.format(d) for d in range(data.shape.ndims)] out_dims = list(in_dims) in_idxs = ['d{}'.format(d) for d in range(data.shape.ndims)] out_idxs = list(in_idxs) shape_dims = list(data.shape.dims) for idx in range(rank): start = pads[idx] end = pads[idx + rank] if start + end: out_dims[idx] = 'D{}+{}'.format(idx, start + end) shape_dims[idx] += start + end if start: out_idxs[idx] = 'd{}+{}'.format(idx, start) if value: # TODO: This is a somewhat inefficient way to write a padding operation. code = """ function (I[{in_dims}], One[], V[]) -> (O) {{ Ones[{out_idxs} : {in_dims}] = =(One[]); InMask[{out_idxs} : {out_dims}] = =(Ones[{in_idxs}]); ValMask = 1 - InMask; Vals = ValMask * V; Ins[{out_idxs} : {out_dims}] = =(I[{in_idxs}]); O = Ins+Vals; }}""".format(in_dims=', '.join(in_dims), out_dims=', '.join(out_dims), in_idxs=', '.join(in_idxs), out_idxs=', '.join(out_idxs)) value_input = [('One', tile.Value.from_var(1, tuple())), ('V', tile.Value.from_var(value, tuple()))] else: code = """ function (I[{in_dims}]) -> (O) {{ O[{out_idxs} : {out_dims}] = =(I[{in_idxs}]); }}""".format(in_dims=', '.join(in_dims), out_dims=', '.join(out_dims), in_idxs=', '.join(in_idxs), out_idxs=', '.join(out_idxs)) value_input = [] outshape = tile.Shape(data.shape.dtype, shape_dims) super(PadConstant, self).__init__(code, [('I', data)] + value_input, [('O', outshape)])
def __init__(self, tensors): tnames = ['I{}'.format(n) for n in range(len(tensors))] code = """ function ({inputs}) -> (O) {{ O = (({input_sum}) / {input_count}); }} """.format(inputs=','.join(tnames), input_sum='+'.join(tnames), input_count=len(tnames)) outshape = tile.Shape( tile.common_dtype(*[t.shape.dtype for t in tensors]), tile.broadcast_dims(*[t.shape.dims for t in tensors])) super(Mean, self).__init__(code, list(zip(tnames, tensors)), [('O', outshape)])
def __init__(self, value): """Initialize the Constant tensor operation. Args: value (onnx_pb2.TensorProto): The tensor to construct. """ self.value = value try: outshape = tile.Shape( opset_util.ONNX_DTYPE_TO_PLAIDML[value.data_type], value.dims) except KeyError: six.raise_from( NotImplementedError( 'ONNX data type {} is not yet implemented by the PlaidML ONNX backend' .format(onnx_pb2.TensorProto.DataType.Name( value.data_type))), None) super(Constant, self).__init__(None, [], [('O', outshape)])
def __init__(self, data, perm=None): if not perm: perm = range(data.shape.ndims - 1, -1, -1) ndims = data.shape.ndims code = """ function (I[{in_dims}]) -> (O) {{ O[{out_idxs} : {out_dims}] = =(I[{in_idxs}]); }}""".format( in_dims=', '.join(['D{}'.format(d) for d in range(ndims)]), out_dims=', '.join(['D{}'.format(perm[d]) for d in range(ndims)]), in_idxs=', '.join(['d{}'.format(d) for d in range(ndims)]), out_idxs=', '.join(['d{}'.format(perm[d]) for d in range(ndims)])) outshape = tile.Shape(data.shape.dtype, [data.shape.dims[perm[d]] for d in range(ndims)]) super(Transpose, self).__init__(code, [('I', data)], [('O', outshape)])
def __init__(self, data, axis): in_dim_list = ['N{}'.format(i) for i in range(data.shape.ndims)] out_l_dim_list = [ '*'.join(['1'] + ['N{}'.format(i) for i in range(axis)]) ] out_r_dim_list = [ '*'.join(['1'] + ['N{}'.format(i) for i in range(axis, data.shape.ndims)]) ] in_dims = list(data.shape.dims) l_size = functools.reduce(lambda x, y: x * y, [1] + in_dims[:axis]) r_size = functools.reduce(lambda x, y: x * y, [1] + in_dims[axis:]) code = 'function (I[{idims}]) -> (O) {{ O = reshape(I, {o_l_dims}, {o_r_dims}); }}'.format( idims=', '.join(in_dim_list), o_l_dims=', '.join(out_l_dim_list), o_r_dims=','.join(out_r_dim_list)) super(Flatten, self).__init__( code, [('I', data)], [('O', tile.Shape(data.shape.dtype, (l_size, r_size)))])
def __init__(self, frame, kernel, rules=((3, ), (2, 3)), wrap=True): code = """ function (I[Y, X, Z], K[KY, KX, Z], NULL, ONE, RGB) -> (O, ORGB){{ {conv}; O = {rules}; ORGB = RGB * O; }} """ # TODO: we could simplify this # TODO: add other rules rule_map = { ((3, ), (2, 3)): '( T < 12 ? (T == 3 ? ONE : NULL) : (T > 13 ? NULL : ONE) )', # labyrinthish ((3, ), (2, 3, 4)): '( T < 12 ? (T == 3 ? ONE : NULL) : (T > 14 ? NULL : ONE) )', } if isinstance(rules, (list, tuple)): rules = rule_map[tuple(rules)] if wrap: # TODO: raise NotImplementedError("Wrap not implemented because no modulo") else: conv = 'T[y, x, z: Y, X, Z] = +(I[y -1 + ky, x -1 + kx, z] * K[ky, kx, z]), ky < KY, kx < KX' code = code.format(rules=rules, conv=conv) logger.debug("TILE CODE: %s:", code) rgb = np.zeros(frame.shape.dims[:-1] + (4, ), dtype="uint8") rgb[:, :] = (255, 255, 255, 255) super(GolOp, self).__init__(code, [ ('I', frame), ('K', kernel), ('NULL', K.constant(np.array(0, dtype='uint8'), dtype='uint8')), ('ONE', K.constant(np.array(1, dtype='uint8'), dtype='uint8')), ('RGB', K.variable(rgb, dtype='uint8')), ], [('O', frame.shape), ('ORGB', ptile.Shape(frame.shape.dtype, rgb.shape))])
def main(code, tensor_A, tensor_B, output_shape): print(K.backend()) op = SandboxOp(code, tensor_A, tensor_B, tile.Shape(plaidml.DType.FLOAT32, output_shape)) print(op.sole_output().shape) print(op.sole_output().eval())
def __init__(self, a, b): # So, for matmul, we have identity dimensions (which remain the same # in the output tensor), and summation dimensions (which are # eliminated in the output tensor). We call these I{1,2,...} and S. # # The matrix multiplication and summation takes place on the low two dimensions. # If either input is one-dimensional, that's its summation dimension. # Otherwise, A's summation dimension is the lowest dimension, and B's summation # dimension is its second-to-lowest. # # Naturally, there can be broadcasting involved; corresponding dimensions # must be broadcast-compatible. a_ndims = a.shape.ndims b_ndims = b.shape.ndims if a_ndims == 0 or b_ndims == 0: raise NotImplementedError( 'MatMul isn\'t defined over scalar values') if a_ndims == 1: if b_ndims == 1: # Both A and B are one dimensional; C is a scalar. # A's dims are [S] # B's dims are [S] # C's dims are [] c_dims = tuple() a_ranges = ['S'] a_indicies = ['s'] b_ranges = ['S'] b_indicies = ['s'] c_ranges = [] c_indicies = [] else: # A is one-dimensional, but B is not: # A's dims are [S] # B's dims are [I0, I1... IN-3, S, IN-1] # C's dims are [I0, I1... IN-3, IN-1] c_shape = tuple(b.dims[:-2] + b.dims[-1]) a_ranges = ['S'] a_indicies = ['s'] b_ranges = (['I{}'.format(n) for n in range(b_ndims - 2)] + ['S', 'I{}'.format(b_ndims - 1)]) b_indicies = (['i{}'.format(n) for n in range(b_ndims - 2)] + ['s', 'i{}'.format(b_ndims - 1)]) c_ranges = [ 'I{}'.format(n) for n in range(b_ndims - 2) + [b_ndims - 1] ] c_indicies = [ 'i{}'.format(n) for n in range(b_ndims - 2) + [b_ndims - 1] ] else: if b_ndims == 1: # B is one-dimensional, but A is not: # A's dims are [I0, I1... IN-3, IN-2, S] # B's dims are [S] # C's dims are [I0, I1... IN-3, IN-2] c_dims = tuple(a.shape.dims[:-1]) a_ranges = ['I{}'.format(n) for n in range(a_ndims - 1)] + ['S'] a_indicies = ['i{}'.format(n) for n in range(a_ndims - 1)] + ['s'] b_ranges = ['S'] b_indicies = ['s'] c_ranges = ['I{}'.format(n) for n in range(a_ndims - 1)] c_indicies = ['i{}'.format(n) for n in range(a_ndims - 1)] else: # Both tensors have more than one dimension. # A's dims are [I0, I1... IN-3, IN-2, S] # B's dims are [I0, I1... IN-3, S, IN-1] # C's dims are [I0, I1... IN-3, IN-2, IN-1]. c_dims = tuple( list( tile.broadcast_dims(a.shape.dims[:-2], b.shape.dims[:-2])) + [a.shape.dims[-2], b.shape.dims[-1]]) a_ranges = ['I{}'.format(n) for n in range(a_ndims - 1)] + ['S'] a_indicies = ['i{}'.format(n) for n in range(a_ndims - 1)] + ['s'] b_ranges = (['I{}'.format(n) for n in range(b_ndims - 2)] + ['S', 'I{}'.format(b_ndims - 1)]) b_indicies = (['i{}'.format(n) for n in range(b_ndims - 2)] + ['s', 'i{}'.format(b_ndims - 1)]) c_ranges = ['I{}'.format(n) for n in range(len(c_dims))] c_indicies = ['i{}'.format(n) for n in range(len(c_dims))] func = """function(A[{a_ranges}], B[{b_ranges}]) -> (C) {{ C[{c_indicies} : {c_ranges}] = +(A[{a_indicies}] * B[{b_indicies}]); }}""".format(a_ranges=', '.join(a_ranges), a_indicies=', '.join(a_indicies), b_ranges=', '.join(b_ranges), b_indicies=', '.join(b_indicies), c_ranges=', '.join(c_ranges), c_indicies=', '.join(c_indicies)) c_shape = tile.Shape(tile.common_dtype(a.shape.dtype, b.shape.dtype), c_dims) super(MatMul, self).__init__(func, [('A', a), ('B', b)], [('C', c_shape)])
def __init__(self, x, dims): super(Reshape, self).__init__( 'function (I) -> (O) {{ O = reshape(I, {}); }}'.format(', '.join( [str(d) for d in dims])), [('I', x)], [('O', tile.Shape(x.shape.dtype, dims))])