Example #1
0
def _conv2d(input,
            filter,
            bias=False,
            strides=[1, 1],
            pads=[1, 1, 1, 1],
            dilations=[1, 1],
            group=1,
            debugContext=''):
    """Encapsulation of function get_builder().aiOnnx.conv!

    args:
        x:      input tensor
        ksize:  int,kernel size
        stride: int,stride of conv
        pads:   int, conv padding
        c_out:  int, output channel
        group:  int, conv group nums,default:1
    """
    args = [input.getIpuIndex(), filter.getIpuIndex()]
    if bias:
        args.append(bias.getIpuIndex())
    output = get_builder().aiOnnx.conv(args,
                                       strides=strides,
                                       pads=pads,
                                       dilations=dilations,
                                       group=group,
                                       debugContext=debugContext)
    if get_memory_proportion() is not None:
        get_builder().setAvailableMemoryProportion(output,
                                                   get_memory_proportion())
    return TTensor(output)
Example #2
0
def clip(x, minmun=-np.inf, maxmun=np.inf, debugContext=""):
    if get_ai_onnx_version() >= 11:
        minmun = constant(np.asarray(minmun).astype(np.float32))
        maxmun = constant(np.asarray(maxmun).astype(np.float32))
        return TTensor(get_builder().aiOnnx.clip(
            [x.getIpuIndex(),
             minmun.getIpuIndex(),
             maxmun.getIpuIndex()],
            debugContext=debugContext))
    else:
        return TTensor(get_builder().aiOnnx.clip([x.getIpuIndex()],
                                                 maxmun.getIpuIndex(),
                                                 minmun.getIpuIndex(),
                                                 debugContext=debugContext))
Example #3
0
def _batchNorm(
    x,
    scale,
    biases,
    mean,
    var,
    num_outputs=1,
    momentum=0.9,
    epsilon=1e-5,
    debugContext="",
):
    results = get_builder().aiOnnx.batchnormalization(
        [
            x.getIpuIndex(),
            scale.getIpuIndex(),
            biases.getIpuIndex(),
            mean.getIpuIndex(),
            var.getIpuIndex()
        ],
        num_outputs=num_outputs,
        epsilon=epsilon,
        momentum=momentum,
        debugContext=debugContext)
    results = results[0] if num_outputs == 1 else results
    if isinstance(results, list):
        results = [TTensor(r) for r in results]
    else:
        results = [TTensor(results)]
    return results
Example #4
0
def softmax_2d(x, axis=1, debugContext=""):
    assert axis in [-1, 1]
    assert x.shape.ndims == 2
    x = get_builder().aiOnnx.softmax(
        [x.getIpuIndex()], axis=axis,
        debugContext=debugContext)
    return TTensor(x)
Example #5
0
def reshape(source, target_shape, debugContext=""):
    """
    args:
        source : tensor name
        target_shape: list of int e.g.: [3,4,5,6]
    """
    if isinstance(target_shape, TTensor):
        target_shape = target_shape.data
    if isinstance(target_shape, np.ndarray):
        target_shape = target_shape.tolist()
    if isinstance(target_shape, list):
        target_shape = [scalarTensor2int(ele) for ele in target_shape]

    target_shape = constant(np.array(target_shape).astype(np.int64),
                            debugContext=debugContext)

    if check_all_constant([source, target_shape]):
        # degrade to np op
        result = source.data.reshape(target_shape.data)
        result = constant(result)
        return result
    else:
        return TTensor(get_builder().aiOnnx.reshape(
            [source.getIpuIndex(),
             target_shape.getIpuIndex()],
            debugContext=debugContext))
Example #6
0
def one_hot(indices, depth, values=None, debugContext=''):
    '''
        values: [off_value, on_value]
        if indice is -1, the corrosponding arr is [0]*depth
    '''
    if isinstance(depth, int):
        depth = to_tensor(depth, dtype='INT64')
    if values is None:
        values = constant(np.asarray([0, 1]).astype(np.int32),
                          debugContext=debugContext)
    assert indices.dtype in [
        'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16'
    ]
    assert depth.dtype in [
        'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16'
    ]
    assert values.dtype in [
        'INT32', 'INT64', 'INT16', 'UINT32', 'UINT64', 'UINT16'
    ]
    result = get_builder().aiOnnx.onehot(
        [indices.getIpuIndex(),
         depth.getIpuIndex(),
         values.getIpuIndex()],
        debugContext=debugContext)
    result = TTensor(result)
    result_shape = list(result.pureShape)
    if result_shape[1] == 0:
        result_shape[1] = depth
    result = result.reshape(result_shape)
    return result
Example #7
0
def tile(input, repeats, debugContext=""):
    if check_all_constant([input, repeats]):
        result = np.tile(input.data, repeats.data)
        return constant(result)
    result = get_builder().aiOnnx.tile(
        [input.getIpuIndex(), repeats.getIpuIndex()], debugContext)
    return TTensor(result)
Example #8
0
 def real_init(self, ):
     assert not self.initialized
     self.data = np.ascontiguousarray(self.data.copy())
     name = get_builder().aiOnnx.constant(self.data)
     super().__init__(name)
     self.__name = name  # private attribute can not be inherited
     self.initialized = True
Example #9
0
def flatten(x):
    '''implements the np.flatten function
    '''
    if check_all_constant([x]):
        x = x.data.flatten()
        return constant(x)
    x = get_builder().aiOnnx.flatten([x.getIpuIndex()], 0)
    return TTensor(x).squeeze(0)
Example #10
0
def min(tensor_list, debugContext=""):
    if check_all_constant(tensor_list):
        # degrade to np op
        arr_list = [t.data for t in tensor_list]
        result = np.min(arr_list)
        return constant(result)
    return TTensor(get_builder().aiOnnx.min(
        [t.getIpuIndex() for t in tensor_list], debugContext=debugContext))
Example #11
0
def reduceMean(x, axes, keepdims=False, debugContext=''):
    if isinstance(axes, int):
        axes = [axes]
    assert isinstance(axes, list) or axes is None
    return TTensor(get_builder().aiOnnx.reducemean([x.getIpuIndex()],
                                                   axes=axes,
                                                   keepdims=keepdims,
                                                   debugContext=debugContext))
Example #12
0
def sub(tensors, debugContext=""):
    assert len(tensors) == 2
    if check_all_constant(tensors):
        # degrade to np op
        result = tensors[0].data - tensors[1].data
        return constant(result, debugContext=debugContext)
    return TTensor(get_builder().aiOnnx.sub([t.getIpuIndex() for t in tensors],
                                            debugContext=debugContext))
Example #13
0
def add(tensors, debugContext=""):
    if check_all_constant(tensors):
        # degrade to np op
        result = 0
        for t in tensors:
            result = result + t.data
        return constant(result, debugContext=debugContext)
    return TTensor(get_builder().aiOnnx.add([t.getIpuIndex() for t in tensors],
                                            debugContext=debugContext))
Example #14
0
def transpose(x, dim_order, debugContext=""):
    """dim_order: list of int. eg:[0,2,3,1]"""
    if check_all_constant([x]):
        # degrade to np op
        result = np.transpose(x.data, dim_order)
        return constant(result)
    return TTensor(get_builder().aiOnnx.transpose([x.getIpuIndex()],
                                                  dim_order,
                                                  debugContext=debugContext))
Example #15
0
def mul(tensors, debugContext=""):
    if check_all_constant(tensors):
        # degrade to np op
        result = 1
        for t in tensors:
            result = t.data * result
        return constant(result, debugContext=debugContext)
    return TTensor(get_builder().aiOnnx.mul([t.getIpuIndex() for t in tensors],
                                            debugContext=debugContext))
Example #16
0
def matmul(x, y, debugContext=""):
    if check_all_constant([x, y]):
        # degrade to np op
        result = np.matmul(x.data, y.data)
        return constant(result)
    else:
        assert x.dtype in ['FLOAT', "FLOAT16"]
        assert y.dtype in ['FLOAT', "FLOAT16"]
        return TTensor(get_builder().aiOnnx.matmul(
            [x.getIpuIndex(), y.getIpuIndex()], debugContext=debugContext))
Example #17
0
def _concat(tensor_list, dim, debugContext=""):
    if check_all_constant(tensor_list):
        # degrade to np op
        np_arr_list = [t.data for t in tensor_list]
        result = np.concatenate(np_arr_list, axis=dim)
        return constant(result)
    return TTensor(get_builder().aiOnnx.concat(
        [tensor.getIpuIndex() for tensor in tensor_list],
        dim,
        debugContext=debugContext))
Example #18
0
def pad(data, pads, mode='constant', constant_value=0, debugContext=''):
    constant_value = constant(constant_value).cast(data.dtype.upper())
    pads = to_tensor(pads).cast('INT64').flatten()
    result = get_builder().aiOnnx.pad(
        [data.getIpuIndex(),
         pads.getIpuIndex(),
         constant_value.getIpuIndex()],
        mode=mode,
        debugContext=debugContext)
    return TTensor(result)
Example #19
0
def reduceprod(x, dim, keepdims=False, debugContext=""):
    """
    args:
        dim: int .which dim to do prod
    """
    x = get_builder().aiOnnx.reduceprod([x.getIpuIndex()],
                                        axes=[dim],
                                        keepdims=keepdims,
                                        debugContext=debugContext)
    return TTensor(x)
Example #20
0
def relu(x, debugContext=""):
    """
    args:
        x:      input tensor
    """
    if isinstance(x, list):
        x = [ele.getIpuIndex() for ele in x]
    else:
        x = [x.getIpuIndex()]
    x = get_builder().aiOnnx.relu(x, debugContext=debugContext)
    return TTensor(x)
Example #21
0
def split(x, lenOfSplit, dim, debugContext=""):
    """
    args:
        lenOfSplit: (4,1) split into two pieceļ¼Œone's length is 4 ,
                    the other is 1
    """
    return TTensor(get_builder().aiOnnx.split([x.getIpuIndex()],
                                              len(lenOfSplit),
                                              dim,
                                              lenOfSplit,
                                              debugContext=debugContext))
Example #22
0
def addInitializedInputTensor(array, debugContext=""):
    """
    args:
        array: an numpy array that will be copy to IPU

        return:
            str: tensor name
    """

    name = get_builder().addInitializedInputTensor(array,
                                                   debugContext=debugContext)
    return TTensor(name)
Example #23
0
def nllloss(prob,
            label,
            reductionType=popart.ReductionType.Mean,
            debugPrefix=''):
    # prob: scaled probabilities, [batch, classes], float
    # label: labels, [batch,], int32
    with name_scope(debugPrefix):
        loss = get_builder().aiGraphcore.nllloss(
            [prob.getIpuIndex(), label.getIpuIndex()],
            reductionType,
            debugContext=debugPrefix)
    return TTensor(loss)
Example #24
0
def cast(x, target_type='FLOAT', debugContext=''):
    """
    target_type:
        FLOAT|FLOAT16|INT8|INT16|INT32|UINT8|UINT16|UINT32|BOOL
    """
    target_type = 'FLOAT' if target_type == 'FLOAT32' else target_type
    if check_all_constant([x]):
        # degrade to np op
        data = x.data.astype(mappin_gc2npy[target_type])
        return constant(data)
    else:
        return TTensor(get_builder().aiOnnx.cast([x.getIpuIndex()],
                                                 target_type.upper(),
                                                 debugContext))
Example #25
0
def unsqueeze(x, dims, debugContext=""):
    """
    args:
        dim: list of int of which dim will delete
                eg:[3] or [1,3]
    """
    if check_all_constant([x]):
        # degrade to np op
        result = np.expand_dims(x.data, axis=dims)
        return constant(result)
    x = get_builder().aiOnnx.unsqueeze([x.getIpuIndex()],
                                       axes=dims,
                                       debugContext=debugContext)
    return TTensor(x)
Example #26
0
def avgPooling(x,
               strides=2,
               kernel_size=2,
               padding=0,
               count_include_pad=0,
               debugContext=""):

    x = get_builder().aiOnnx.averagepool(
        [x.getIpuIndex()],
        kernel_shape=[kernel_size, kernel_size],
        count_include_pad=count_include_pad,
        pads=[padding] * 4,
        strides=[strides, strides],
        debugContext=debugContext)
    return TTensor(x)
Example #27
0
def topk(x, k, sorted=True, dim=-1, debugContext=""):
    """
    args:
        k:      the count of return
        dim:    in which dim to sort and clip
    """
    if k.shape.ndims == 0:
        k = k.unsqueeze(0)
    else:
        assert k.shape.ndims == 1
    values, order = get_builder().aiOnnx.topk(
        [x.getIpuIndex(), k.getIpuIndex()],
        axis=dim,
        sorted=sorted,
        debugContext=debugContext)
    return TTensor(values), TTensor(order)
Example #28
0
def resize(x,
           roi=None,
           scales=None,
           sizes=None,
           coordinate_transformation_mode='half_pixel',
           cubic_coeff_a=-0.75,
           exclude_outside=0,
           extrapolation_value=0.0,
           mode='nearest',
           nearest_mode='round_prefer_floor',
           debugContext=''):
    # TODO Check whether each parameter is correct
    # x:N-D tensor
    # roi: 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X
    # scales: tensor(float),The scale array along each dimension.
    # sizes: tensor(int64),The size of the output tensor.
    # Only one of 'scales' and 'sizes' can be specified.
    assert None in [scales, sizes] and set([scales, sizes]) == 2
    if roi is None:
        assert coordinate_transformation_mode == 'tf_crop_and_resize'
    else:
        raise not NotImplementedError
    roi = constant(
        np.array([0, -1] * x.shape.ndims).astype(
            mappin_gc2npy[x.dtype])) if roi is None else roi
    scales = constant(np.array(
        [1.0] * x.shape.ndims).astype('FLOAT32')) if scales is None else scales
    sizes = constant(np.array(
        [1] * x.shape.ndims).astype('INT64')) if sizes is None else sizes
    inputs_list = [
        x.getIpuIndex(),
        roi.getIpuIndex(),
        scales.getIpuIndex(),
        sizes.getIpuIndex()
    ]
    inputs_dic = {
        'coordinate_transformation_mode': coordinate_transformation_mode,
        'cubic_coeff_a': cubic_coeff_a,
        'exclude_outside': exclude_outside,
        'extrapolation_value': extrapolation_value,
        'mode': mode,
        'nearest_mode': nearest_mode,
        'debugContext': debugContext
    }
    result = TTensor(get_builder().aiOnnx.resize(inputs_list, **inputs_dic))
    return result
Example #29
0
def gc_slice(x, axes, starts, ends, debugContext=""):
    if check_all_constant([x, axes, starts, ends]):
        # degrade to np op
        x = x.data
        x_slices = []
        for start, end in zip(starts.data.tolist(), ends.data.tolist()):
            x_slices.append(slice(start, end))
        return constant(x[x_slices])
    else:
        x = get_builder().aiOnnx.slice([
            x.getIpuIndex(),
            starts.getIpuIndex(),
            ends.getIpuIndex(),
            axes.getIpuIndex()
        ],
            debugContext=debugContext)
    return TTensor(x)
Example #30
0
def squeeze(x, dims, debugContext=""):
    if check_all_constant([x]):
        # degrade to np op
        x = x.data
        current_dim = float('inf')
        for dim in reversed(dims):
            assert current_dim > dim
            current_dim = dim
            x = x.squeeze(dim)
        return constant(x)
    if isinstance(dims, int):
        dims = [dims]
    for dim in dims:
        assert x.pureShape[dim] == 1
    x = get_builder().aiOnnx.squeeze([x.getIpuIndex()],
                                     axes=dims,
                                     debugContext=debugContext)
    return TTensor(x)