def _conv2d(input, filter, bias=False, strides=[1, 1], pads=[1, 1, 1, 1], dilations=[1, 1], group=1, debugContext=''): """Encapsulation of function get_builder().aiOnnx.conv! args: x: input tensor ksize: int,kernel size stride: int,stride of conv pads: int, conv padding c_out: int, output channel group: int, conv group nums,default:1 """ args = [input.getIpuIndex(), filter.getIpuIndex()] if bias: args.append(bias.getIpuIndex()) output = get_builder().aiOnnx.conv(args, strides=strides, pads=pads, dilations=dilations, group=group, debugContext=debugContext) if get_memory_proportion() is not None: get_builder().setAvailableMemoryProportion(output, get_memory_proportion()) return TTensor(output)
def clip(x, minmun=-np.inf, maxmun=np.inf, debugContext=""): if get_ai_onnx_version() >= 11: minmun = constant(np.asarray(minmun).astype(np.float32)) maxmun = constant(np.asarray(maxmun).astype(np.float32)) return TTensor(get_builder().aiOnnx.clip( [x.getIpuIndex(), minmun.getIpuIndex(), maxmun.getIpuIndex()], debugContext=debugContext)) else: return TTensor(get_builder().aiOnnx.clip([x.getIpuIndex()], maxmun.getIpuIndex(), minmun.getIpuIndex(), debugContext=debugContext))
def _batchNorm( x, scale, biases, mean, var, num_outputs=1, momentum=0.9, epsilon=1e-5, debugContext="", ): results = get_builder().aiOnnx.batchnormalization( [ x.getIpuIndex(), scale.getIpuIndex(), biases.getIpuIndex(), mean.getIpuIndex(), var.getIpuIndex() ], num_outputs=num_outputs, epsilon=epsilon, momentum=momentum, debugContext=debugContext) results = results[0] if num_outputs == 1 else results if isinstance(results, list): results = [TTensor(r) for r in results] else: results = [TTensor(results)] return results
def softmax_2d(x, axis=1, debugContext=""): assert axis in [-1, 1] assert x.shape.ndims == 2 x = get_builder().aiOnnx.softmax( [x.getIpuIndex()], axis=axis, debugContext=debugContext) return TTensor(x)
def reshape(source, target_shape, debugContext=""): """ args: source : tensor name target_shape: list of int e.g.: [3,4,5,6] """ if isinstance(target_shape, TTensor): target_shape = target_shape.data if isinstance(target_shape, np.ndarray): target_shape = target_shape.tolist() if isinstance(target_shape, list): target_shape = [scalarTensor2int(ele) for ele in target_shape] target_shape = constant(np.array(target_shape).astype(np.int64), debugContext=debugContext) if check_all_constant([source, target_shape]): # degrade to np op result = source.data.reshape(target_shape.data) result = constant(result) return result else: return TTensor(get_builder().aiOnnx.reshape( [source.getIpuIndex(), target_shape.getIpuIndex()], debugContext=debugContext))
def one_hot(indices, depth, values=None, debugContext=''): ''' values: [off_value, on_value] if indice is -1, the corrosponding arr is [0]*depth ''' if isinstance(depth, int): depth = to_tensor(depth, dtype='INT64') if values is None: values = constant(np.asarray([0, 1]).astype(np.int32), debugContext=debugContext) assert indices.dtype in [ 'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16' ] assert depth.dtype in [ 'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16' ] assert values.dtype in [ 'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16' ] result = get_builder().aiOnnx.onehot( [indices.getIpuIndex(), depth.getIpuIndex(), values.getIpuIndex()], debugContext=debugContext) result = TTensor(result) result_shape = list(result.pureShape) if result_shape[1] == 0: result_shape[1] = depth result = result.reshape(result_shape) return result
def tile(input, repeats, debugContext=""): if check_all_constant([input, repeats]): result = np.tile(input.data, repeats.data) return constant(result) result = get_builder().aiOnnx.tile( [input.getIpuIndex(), repeats.getIpuIndex()], debugContext) return TTensor(result)
def real_init(self, ): assert not self.initialized self.data = np.ascontiguousarray(self.data.copy()) name = get_builder().aiOnnx.constant(self.data) super().__init__(name) self.__name = name # private attribute can not be inherited self.initialized = True
def flatten(x): '''implements the np.flatten function ''' if check_all_constant([x]): x = x.data.flatten() return constant(x) x = get_builder().aiOnnx.flatten([x.getIpuIndex()], 0) return TTensor(x).squeeze(0)
def min(tensor_list, debugContext=""): if check_all_constant(tensor_list): # degrade to np op arr_list = [t.data for t in tensor_list] result = np.min(arr_list) return constant(result) return TTensor(get_builder().aiOnnx.min( [t.getIpuIndex() for t in tensor_list], debugContext=debugContext))
def reduceMean(x, axes, keepdims=False, debugContext=''): if isinstance(axes, int): axes = [axes] assert isinstance(axes, list) or axes is None return TTensor(get_builder().aiOnnx.reducemean([x.getIpuIndex()], axes=axes, keepdims=keepdims, debugContext=debugContext))
def sub(tensors, debugContext=""): assert len(tensors) == 2 if check_all_constant(tensors): # degrade to np op result = tensors[0].data - tensors[1].data return constant(result, debugContext=debugContext) return TTensor(get_builder().aiOnnx.sub([t.getIpuIndex() for t in tensors], debugContext=debugContext))
def add(tensors, debugContext=""): if check_all_constant(tensors): # degrade to np op result = 0 for t in tensors: result = result + t.data return constant(result, debugContext=debugContext) return TTensor(get_builder().aiOnnx.add([t.getIpuIndex() for t in tensors], debugContext=debugContext))
def transpose(x, dim_order, debugContext=""): """dim_order: list of int. eg:[0,2,3,1]""" if check_all_constant([x]): # degrade to np op result = np.transpose(x.data, dim_order) return constant(result) return TTensor(get_builder().aiOnnx.transpose([x.getIpuIndex()], dim_order, debugContext=debugContext))
def mul(tensors, debugContext=""): if check_all_constant(tensors): # degrade to np op result = 1 for t in tensors: result = t.data * result return constant(result, debugContext=debugContext) return TTensor(get_builder().aiOnnx.mul([t.getIpuIndex() for t in tensors], debugContext=debugContext))
def matmul(x, y, debugContext=""): if check_all_constant([x, y]): # degrade to np op result = np.matmul(x.data, y.data) return constant(result) else: assert x.dtype in ['FLOAT', "FLOAT16"] assert y.dtype in ['FLOAT', "FLOAT16"] return TTensor(get_builder().aiOnnx.matmul( [x.getIpuIndex(), y.getIpuIndex()], debugContext=debugContext))
def _concat(tensor_list, dim, debugContext=""): if check_all_constant(tensor_list): # degrade to np op np_arr_list = [t.data for t in tensor_list] result = np.concatenate(np_arr_list, axis=dim) return constant(result) return TTensor(get_builder().aiOnnx.concat( [tensor.getIpuIndex() for tensor in tensor_list], dim, debugContext=debugContext))
def pad(data, pads, mode='constant', constant_value=0, debugContext=''): constant_value = constant(constant_value).cast(data.dtype.upper()) pads = to_tensor(pads).cast('INT64').flatten() result = get_builder().aiOnnx.pad( [data.getIpuIndex(), pads.getIpuIndex(), constant_value.getIpuIndex()], mode=mode, debugContext=debugContext) return TTensor(result)
def reduceprod(x, dim, keepdims=False, debugContext=""): """ args: dim: int .which dim to do prod """ x = get_builder().aiOnnx.reduceprod([x.getIpuIndex()], axes=[dim], keepdims=keepdims, debugContext=debugContext) return TTensor(x)
def relu(x, debugContext=""): """ args: x: input tensor """ if isinstance(x, list): x = [ele.getIpuIndex() for ele in x] else: x = [x.getIpuIndex()] x = get_builder().aiOnnx.relu(x, debugContext=debugContext) return TTensor(x)
def split(x, lenOfSplit, dim, debugContext=""): """ args: lenOfSplit: (4,1) split into two pieceļ¼one's length is 4 , the other is 1 """ return TTensor(get_builder().aiOnnx.split([x.getIpuIndex()], len(lenOfSplit), dim, lenOfSplit, debugContext=debugContext))
def addInitializedInputTensor(array, debugContext=""): """ args: array: an numpy array that will be copy to IPU return: str: tensor name """ name = get_builder().addInitializedInputTensor(array, debugContext=debugContext) return TTensor(name)
def nllloss(prob, label, reductionType=popart.ReductionType.Mean, debugPrefix=''): # prob: scaled probabilities, [batch, classes], float # label: labels, [batch,], int32 with name_scope(debugPrefix): loss = get_builder().aiGraphcore.nllloss( [prob.getIpuIndex(), label.getIpuIndex()], reductionType, debugContext=debugPrefix) return TTensor(loss)
def cast(x, target_type='FLOAT', debugContext=''): """ target_type: FLOAT|FLOAT16|INT8|INT16|INT32|UINT8|UINT16|UINT32|BOOL """ target_type = 'FLOAT' if target_type == 'FLOAT32' else target_type if check_all_constant([x]): # degrade to np op data = x.data.astype(mappin_gc2npy[target_type]) return constant(data) else: return TTensor(get_builder().aiOnnx.cast([x.getIpuIndex()], target_type.upper(), debugContext))
def unsqueeze(x, dims, debugContext=""): """ args: dim: list of int of which dim will delete eg:[3] or [1,3] """ if check_all_constant([x]): # degrade to np op result = np.expand_dims(x.data, axis=dims) return constant(result) x = get_builder().aiOnnx.unsqueeze([x.getIpuIndex()], axes=dims, debugContext=debugContext) return TTensor(x)
def avgPooling(x, strides=2, kernel_size=2, padding=0, count_include_pad=0, debugContext=""): x = get_builder().aiOnnx.averagepool( [x.getIpuIndex()], kernel_shape=[kernel_size, kernel_size], count_include_pad=count_include_pad, pads=[padding] * 4, strides=[strides, strides], debugContext=debugContext) return TTensor(x)
def topk(x, k, sorted=True, dim=-1, debugContext=""): """ args: k: the count of return dim: in which dim to sort and clip """ if k.shape.ndims == 0: k = k.unsqueeze(0) else: assert k.shape.ndims == 1 values, order = get_builder().aiOnnx.topk( [x.getIpuIndex(), k.getIpuIndex()], axis=dim, sorted=sorted, debugContext=debugContext) return TTensor(values), TTensor(order)
def resize(x, roi=None, scales=None, sizes=None, coordinate_transformation_mode='half_pixel', cubic_coeff_a=-0.75, exclude_outside=0, extrapolation_value=0.0, mode='nearest', nearest_mode='round_prefer_floor', debugContext=''): # TODO Check whether each parameter is correct # x:N-D tensor # roi: 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X # scales: tensor(float),The scale array along each dimension. # sizes: tensor(int64),The size of the output tensor. # Only one of 'scales' and 'sizes' can be specified. assert None in [scales, sizes] and set([scales, sizes]) == 2 if roi is None: assert coordinate_transformation_mode == 'tf_crop_and_resize' else: raise not NotImplementedError roi = constant( np.array([0, -1] * x.shape.ndims).astype( mappin_gc2npy[x.dtype])) if roi is None else roi scales = constant(np.array( [1.0] * x.shape.ndims).astype('FLOAT32')) if scales is None else scales sizes = constant(np.array( [1] * x.shape.ndims).astype('INT64')) if sizes is None else sizes inputs_list = [ x.getIpuIndex(), roi.getIpuIndex(), scales.getIpuIndex(), sizes.getIpuIndex() ] inputs_dic = { 'coordinate_transformation_mode': coordinate_transformation_mode, 'cubic_coeff_a': cubic_coeff_a, 'exclude_outside': exclude_outside, 'extrapolation_value': extrapolation_value, 'mode': mode, 'nearest_mode': nearest_mode, 'debugContext': debugContext } result = TTensor(get_builder().aiOnnx.resize(inputs_list, **inputs_dic)) return result
def gc_slice(x, axes, starts, ends, debugContext=""): if check_all_constant([x, axes, starts, ends]): # degrade to np op x = x.data x_slices = [] for start, end in zip(starts.data.tolist(), ends.data.tolist()): x_slices.append(slice(start, end)) return constant(x[x_slices]) else: x = get_builder().aiOnnx.slice([ x.getIpuIndex(), starts.getIpuIndex(), ends.getIpuIndex(), axes.getIpuIndex() ], debugContext=debugContext) return TTensor(x)
def squeeze(x, dims, debugContext=""): if check_all_constant([x]): # degrade to np op x = x.data current_dim = float('inf') for dim in reversed(dims): assert current_dim > dim current_dim = dim x = x.squeeze(dim) return constant(x) if isinstance(dims, int): dims = [dims] for dim in dims: assert x.pureShape[dim] == 1 x = get_builder().aiOnnx.squeeze([x.getIpuIndex()], axes=dims, debugContext=debugContext) return TTensor(x)