コード例 #1
0
ファイル: utils.py プロジェクト: zxr8192/singa
def handle_odd_pad_fwd(x, odd_padding):
    """
    handle odd padding mode forward
    Args:x
        the input tensor
    Args:odd_padding
        the odd_padding
    Returns: 
        tensor, the output
    """
    x_tensor = tensor.from_raw_tensor(x)
    # (axis, left padding if True else right padding)
    flags = [(2, True), (2, False), (3, True), (3, False)]
    for (axis, left), pad in zip(flags, odd_padding):
        if pad == 0:
            continue
        zeros_shape = list(x_tensor.data.shape())
        zeros_shape[axis] = pad
        zero_padding = np.zeros(zeros_shape).astype(np.float32)
        zero_padding = tensor.Tensor(device=x.device(), data=zero_padding)
        if left:
            x_tensor = tensor.concatenate((zero_padding, x_tensor), axis)
        else:
            x_tensor = tensor.concatenate((x_tensor, zero_padding), axis)
    return x_tensor.data
コード例 #2
0
ファイル: test_tensor.py プロジェクト: zxr8192/singa
    def _concatenate_helper(self, dev):
        np1 = np.random.random([5, 6, 7, 8]).astype(np.float32)
        np2 = np.random.random([5, 6, 7, 1]).astype(np.float32)
        np3 = np.concatenate((np1, np2), axis=3)

        t1 = tensor.Tensor(device=dev, data=np1)
        t2 = tensor.Tensor(device=dev, data=np2)

        t3 = tensor.concatenate((t1, t2), 3)

        np.testing.assert_array_almost_equal(tensor.to_numpy(t3), np3)
コード例 #3
0
    def predict(self, queries: List[str]):
        print("Get queries")

        res = []
        queries = [self._preprocess(ele) for ele in queries]

        for input_ids in queries:
            x = tensor.Tensor(device=self.dev, data=input_ids)
            out = []
            for i in range(self.length):
                y = self._model.forward(x)
                y = autograd.reshape(y, y.shape[-2:])[-1, :]
                y = tensor.softmax(y)
                y = tensor.to_numpy(y)[0]
                y = np.argsort(y)[-1]
                out.append(y)
                y = np.array([y]).reshape([1, 1, -1]).astype(np.float32)
                y = tensor.Tensor(device=self.dev, data=y)
                x = tensor.concatenate([x, y], 2)
            result = self._postprocess(out)
            res.append(result)
        return res
コード例 #4
0
    logging.info("model compling...")
    dev = device.get_default_device()
    x = tensor.Tensor(device=dev, data=input_ids)
    model = MyModel(onnx_model)

    # verifty the test
    # from utils import load_dataset
    # sg_ir = sonnx.prepare(onnx_model) # run without graph
    # inputs, ref_outputs = load_dataset(
    #     os.path.join('/tmp', 'GPT-2-LM-HEAD', 'test_data_set_0'))
    # outputs = sg_ir.run(inputs)
    # for ref_o, o in zip(ref_outputs, outputs):
    #     np.testing.assert_almost_equal(ref_o, o, 4)

    logging.info("model running...")
    output = []

    for i in range(length):
        logging.info("word {} generating...".format(i))
        y = model.forward(x)
        y = autograd.reshape(y, y.shape[-2:])[-1, :]
        y = tensor.softmax(y)
        y = tensor.to_numpy(y)[0]
        y = np.argsort(y)[-1]
        output.append(y)
        y = np.array([y]).reshape([1, 1, -1]).astype(np.float32)
        y = tensor.Tensor(device=dev, data=y)
        x = tensor.concatenate([x, y], 2)

    text = postprocess(output)
    print(text)