def kvps_to_list(kvps): try: x = kvps.elts return [(decode_str(x[i].key), decode_str(x[i].value)) for i in range(kvps.size)] finally: ffi_call(lib.plaidml_kvps_free, kvps)
def _wrap_poly(x): if isinstance(x, six.integer_types): return TensorIndex(expr=ffi_call(lib.plaidml_poly_expr_literal, x)) if isinstance(x, TensorDim): return TensorIndex( expr=ffi_call(lib.plaidml_poly_expr_dim, x.as_ptr())) return x
def kvps_to_dict(kvps): try: x = kvps.elts return { decode_str(x[i].key): decode_str(x[i].value) for i in range(kvps.size) } finally: ffi_call(lib.plaidml_kvps_free, kvps)
def __init(): """ Initializes the PlaidML Core API. """ ffi_call(lib.plaidml_init) lib_version = ffi.string(ffi_call(lib.plaidml_version)).decode() if lib_version != PLAIDML_VERSION: raise EnvironmentError( 'Version mismatch. plaidml (python): {}, {} (C++): {}'.format( PLAIDML_VERSION, lib.lib_name, lib_version)) return PLAIDML_VERSION
def __init__(self, shape=None, ptr=None, data=None): self._ndarray = None if data is not None: self.__data = data cdata = ffi.from_buffer(data) ffi_obj = ffi_call(lib.plaidml_buffer_adopt, shape.as_ptr(), cdata, len(cdata)) elif ptr: ffi_obj = ptr else: ffi_obj = ffi_call(lib.plaidml_buffer_alloc, shape.as_ptr()) super(Buffer, self).__init__(ffi_obj)
def __init__(self, expr=None, value=None, lens=TensorLens()): self._lens = lens if value is not None: if isinstance(value, six.integer_types): expr = ffi_call(lib.plaidml_expr_int, value) elif isinstance(value, float): expr = ffi_call(lib.plaidml_expr_float, value) else: raise TypeError('Invalid type for value={}'.format(value)) elif expr is None: raise ValueError('One of expr= or value= must be specified.') super(Tensor, self).__init__(expr)
def __init__(self, value): # logger.debug('Value({})'.format(value)) if isinstance(value, np.ndarray): if value.ndim == 0: value = value.item() else: value = value.tolist() if value is None: ffi_obj = ffi_call(lib.plaidml_value_none) elif isinstance(value, enum.IntEnum): ffi_obj = ffi_call(lib.plaidml_value_int, value.value) elif isinstance(value, (six.integer_types, bool)): ffi_obj = ffi_call(lib.plaidml_value_int, value) elif isinstance(value, float): ffi_obj = ffi_call(lib.plaidml_value_float, value) elif isinstance(value, TensorDim): ffi_obj = ffi_call(lib.plaidml_value_dim, value.as_ptr()) elif isinstance(value, Tensor): ffi_obj = ffi_call(lib.plaidml_value_expr, value.as_ptr()) elif isinstance(value, (list, tuple)): self._elts = [Value(x) for x in value] raw_elts = [x.as_ptr() for x in self._elts] ffi_obj = ffi_call(lib.plaidml_value_tuple, len(raw_elts), raw_elts) elif isinstance(value, six.string_types): ffi_obj = ffi_call(lib.plaidml_value_str, value.encode()) elif isinstance(value, ffi.CData) and ffi.typeof(value) is ffi.typeof( 'plaidml_value*'): ffi_obj = value else: raise TypeError('Unsupported type {} for value={}'.format( type(value), value)) super(Value, self).__init__(ffi_obj)
def _wrap_tensor(x): if isinstance(x, six.integer_types): return Tensor(expr=ffi_call(lib.plaidml_expr_int, x)) if np.issubdtype(type(x), np.integer): return Tensor(expr=ffi_call(lib.plaidml_expr_int, x.item())) if isinstance(x, float): return Tensor(expr=ffi_call(lib.plaidml_expr_float, x)) if isinstance(x, TensorDim): return Tensor(expr=ffi_call(lib.plaidml_expr_dim, x.as_ptr())) if isinstance(x, Tensor): return x raise TypeError( 'Unexpected type for call argument: {}. fn: {}, args: {}, bad arg: {}'. format(type(x), fn, args, x))
def cast(x, dtype): """Casts the element type of a tensor ``x`` to the type specified by ``dtype``. Args: x (Tensor): The tensor used to peform the elementwise ``cast``. dtype (DType): The datatype to ``cast`` to Returns: Tensor: The result of the elementwise ``cast`` operation. """ tensor = _wrap_tensor(x) return Tensor(expr=ffi_call(lib.plaidml_expr_cast, tensor.as_ptr(), dtype))
def Constant(buffer, dims=[], name=''): """Creates a tensor with constant values. Args: buffer (Buffer): A Buffer that stores the values of the ``Constant``. dims (list, optional): Specifies the dimensions of the ``Constant``. name (string, optional): A name to be assigned to the ``Tensor``. Returns: Tensor: The constant ``Tensor``. """ return Tensor(expr=ffi_call(lib.plaidml_expr_constant, buffer.as_ptr(), name.encode()))
def __init__(self, dtype=None, sizes=[], strides=[], ptr=None): if ptr: ffi_obj = ptr elif dtype is not None: raw_sizes = ffi.new('int64_t[]', [0 if x is None else x for x in sizes]) if strides: raw_strides = ffi.new('int64_t[]', strides) else: raw_strides = ffi.NULL ffi_obj = ffi_call(lib.plaidml_shape_alloc, dtype, len(sizes), raw_sizes, raw_strides) else: raise ValueError('One of dtype= or ptr= must be specified.') super(TensorShape, self).__init__(ffi_obj)
def pragma(tensor, op, attrs): tensor = _wrap_tensor(tensor) keys = [] values = [] raw_attrs = [] for key, value in attrs.items(): key = ffi.new('char[]', key.encode()) keys.append(key) value = _wrap_value(value) values.append(value) raw_attrs.append( ffi.new('plaidml_attr*', { 'key': key, 'value': value.as_ptr() })) return Tensor(expr=ffi_call(lib.plaidml_expr_pragma, tensor.as_ptr(), op.encode(), len(raw_attrs), raw_attrs))
def __init__(self, name, inputs, outputs, shapes=None): # logger.debug('Program({}, {}, {}, {})'.format(name, inputs, outputs, shapes)) raw_inputs = [x.as_ptr() for x in inputs] raw_outputs = [x.as_ptr() for x in outputs] if shapes: raw_shapes = [x.as_ptr() for x in shapes] else: raw_shapes = ffi.NULL ffi_obj = ffi_call( lib.plaidml_build, name.encode(), len(raw_inputs), raw_inputs, raw_shapes, len(raw_outputs), raw_outputs, ) super(Program, self).__init__(ffi_obj)
def Placeholder(dtype_or_shape, dims=[], name=''): """Creates a placeholder tensor. Args: dtype_or_shape (DType | TensorShape): A data type or a shape can be specified. If a shape is specified, the `dims` parameter is ignored. dims (list, optional): Specifies the dimensions of the ``Placeholder``. name (string, optional): A name to be assigned to the ``Tensor``. Returns: Tensor: The placeholder ``Tensor``. """ if isinstance(dtype_or_shape, TensorShape): shape = dtype_or_shape elif isinstance(dtype_or_shape, DType): shape = TensorShape(dtype=dtype_or_shape, sizes=dims) else: raise TypeError('Unsupported type {} for dtype_or_shape={}'.format( type(dtype_or_shape), dtype_or_shape)) return Tensor( expr=ffi_call(lib.plaidml_expr_input, shape.as_ptr(), name.encode()))
def get(key): ret = decode_str(ffi_call(lib.plaidml_settings_get, key.encode())) if ret is None: raise EnvironmentError('Could not find setting: {}'.format(key)) return ret
def __init__(self, expr=None, name=''): """TensorIndex constructor.""" if expr is None: expr = ffi_call(lib.plaidml_poly_expr_index, name.encode()) super(TensorIndex, self).__init__(expr)
def _wrap_dim(x): if isinstance(x, six.integer_types): return TensorDim(expr=ffi_call(lib.plaidml_dim_expr_int, x)) return x
def __shutdown(): ffi_call(lib.plaidml_shutdown)
def _dim_op(op, *args): args = [_wrap_dim(x) for x in args] raw_args = [x.as_ptr() for x in args] return ffi_call(lib.plaidml_dim_expr_op, op, len(args), raw_args)
def __init__(self, expr=None): """TensorDim constructor.""" if expr is None: expr = ffi_call(lib.plaidml_dim_expr_none) super(TensorDim, self).__init__(expr)
def __init__(self, program, device=''): # logger.debug('Executable({}, {})'.format(inputs, outputs)) ffi_obj = ffi_call(lib.plaidml_jit, program.as_ptr(), device.encode()) super(Executable, self).__init__(ffi_obj)
def __init(): """ Initializes the PlaidML Execution API. """ ffi_call(lib.plaidml_exec_init)
def load(): ffi_call(lib.plaidml_settings_load) return all()
def set(key, value): ffi_call(lib.plaidml_settings_set, key.encode(), value.encode())
def build(self): if isinstance(self.__rhs, IndexedTensor): rhs = self.__rhs elif isinstance(self.__rhs, Tensor): rhs = IndexedTensor(lib.PLAIDML_COMBO_OP_NONE, ref=self.__rhs, idxs=()) else: tensor = Tensor(value=self.__rhs) rhs = IndexedTensor(lib.PLAIDML_COMBO_OP_NONE, ref=tensor, idxs=()) def make_list(idxs): if isinstance(idxs, tuple) or isinstance(idxs, list): return idxs return [idxs] dims = [_wrap_dim(x) for x in self.__outDims] raw_dims = [x.as_ptr() for x in self.__lens.apply(dims)] idxs = [_wrap_poly(x) for x in make_list(self.__outIdxs)] raw_idxs = [x.as_ptr() for x in self.__lens.apply(idxs)] init = ffi.NULL if self.__init: init = self.__init.as_ptr() tensor = Tensor(expr=ffi_call( lib.plaidml_expr_contraction, self.__agg_op, rhs._op, len(raw_idxs), raw_idxs, raw_dims, init, self.__name.encode(), )) if rhs._op == lib.PLAIDML_COMBO_OP_NONE: operands = [rhs] else: operands = rhs._args for operand in operands: idxs = [_wrap_poly(x) for x in make_list(operand._idxs)] raw_idxs = [x.as_ptr() for x in operand._ref._lens.apply(idxs)] ffi_call( lib.plaidml_contraction_add_operand, tensor.as_ptr(), operand._ref.as_ptr(), len(raw_idxs), raw_idxs, ) for constraint in self.__constraints: ffi_call( lib.plaidml_contraction_add_constraint, tensor.as_ptr(), constraint.lhs.as_ptr(), constraint.rhs.as_ptr(), ) ffi_call(lib.plaidml_contraction_build, tensor.as_ptr()) return tensor
def save(): ffi_call(lib.plaidml_settings_save)
def intrinsic(fn, *args): args = [_wrap_tensor(x) for x in args] raw_args = [x.as_ptr() for x in args] return Tensor(expr=ffi_call(lib.plaidml_expr_intrinsic, fn.encode(), len(args), raw_args))
def all(): return plaidml.kvps_to_dict(ffi_call(lib.plaidml_settings_list))
def op(op_name, args): value = edsl.Value(args) return edsl.Value(ffi_call(lib.plaidml_op_make, op_name.encode(), value.as_ptr()))