Пример #1
0
 def done(self):
     arr = numpy.array(self.data, dtype=self.dtype).reshape(self.shape)
     t = core.LoDTensor()
     t.set(arr, self.place)
     if self.lod_level > 0:
         t.set_lod(self.lod)
     return t
Пример #2
0
 def done(self):
     arr = numpy.array(self.data, dtype=self.dtype)
     if self.shape:
         arr = arr.reshape(self.shape)
     t = core.LoDTensor()
     t.set(arr, self.place)
     if self.lod_level > 0:
         t.set_recursive_sequence_lengths(self.lod)
     return t
Пример #3
0
def create_lod_tensor(data, lod, place):
    """Create a lod tensor from a numpy array, a list, or an existing lod tensor.

    Create a lod tensor by doing the following:
    1. Check that the length-based input lod is valid.
    2. Convert the length-based lod to a offset-based LoD.
    3. Copy the data from a numpy array, a list or a existing lod tensor to 
       CPU or GPU device (based on input place).
    4. Set the level of detail (LoD) using the offset-based LoD.
    
    Use example:
    Suppose we want LoDTensor to hold data for sequences of word, where each word is
    represented by an integer. If we want to create a LoDTensor to represent two 
    sentences, one of 2 words, and one of 3 words. 

    Then 'data' can be a numpy array of integers with shape (5, 1).
    'lod' will be [[2, 3]], indicating the length(# of words) in each sentence.
    This length-based input lod [[2, 3]] will be converted to offset-based lod [[0, 2, 5]]
    inside the function call.

    Please refer to 
    github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/lod_tensor.md
    for more details regarding LoD.

    Args:
        data: a numpy array or a LoDTensor or a list holding the data to be copied.
        lod: a list of lists indicating the length-based LoD info specified by the user. 
        place: CPU or GPU place indicating where the data in the new LoDTensor will be stored.

    Returns:
        A fluid LoDTensor object with tensor data and lod info.
    """
    if isinstance(data, core.LoDTensor):
        return create_lod_tensor(np.array(data), lod, place)
    elif isinstance(data, list):
        # When input data is a list, it only deal with the case where the base element
        # is an index of shape [1] and dtype int64 (e.g., word id). Hence, the generated
        # LoDTensor will be of shape [n, 1] and dtype int64, where `n` is the total number
        # of words or other indexes in the sequence.
        new_lod = []
        for seq in data:
            new_lod.append(len(seq))
        assert [new_lod] == lod, "data and lod do not match"
        flattened_data = np.concatenate(data, axis=0).astype("int64")
        flattened_data = flattened_data.reshape([len(flattened_data), 1])
        return create_lod_tensor(flattened_data, lod, place)
    elif isinstance(data, np.ndarray):
        assert _validate_lod(lod,
                             data.shape[0]), "the provided lod info is invalid"
        tensor = core.LoDTensor()
        tensor.set(data, place)
        tensor.set_lod(_convert_lod(lod))
        return tensor
    else:
        raise TypeError(
            "data should be either a LoDTensor, a Numpy array or a list")
Пример #4
0
    def run(self, fetch_list, feed=None, feed_dict=None):
        """
        Run a parallel executor with fetch_list.

        The feed parameter can be a dict or a list. If feed is a dict, the
        feed data will be split into multiple devices. If feed is a list, we
        assume the data has been splitted into multiple devices, the each
        element in the list will be copied to each device directly.

        For example, if the feed is a dict:
        >>> exe = ParallelExecutor()
        >>> # the image will be splitted into devices. If there is two devices
        >>> # each device will process an image with shape (24, 1, 28, 28)
        >>> exe.run(feed={'image': numpy.random.random(size=(48, 1, 28, 28))})

        For example, if the feed is a list:
        >>> exe = ParallelExecutor()
        >>> # each device will process each element in the list.
        >>> # the 1st device will process an image with shape (48, 1, 28, 28)
        >>> # the 2nd device will process an image with shape (32, 1, 28, 28)
        >>> #
        >>> # you can use exe.device_count to get the device number.
        >>> exe.run(feed=[{"image": numpy.random.random(size=(48, 1, 28, 28))},
        >>>               {"image": numpy.random.random(size=(32, 1, 28, 28))},
        >>>              ])


        Args:
            fetch_list(list): The fetched variable names
            feed(list|dict|None): The feed variables. If the feed is a dict,
                tensors in that dict will be splitted into each devices. If
                the feed is a list, each element of the list will be copied
                to each device.
            feed_dict: Alias for feed parameter, for backward compatibility.
                This parameter is deprecated.

        Returns: fetched result list.

        """
        if feed is None and feed_dict is not None:
            feed = feed_dict
            print >> sys.stderr, "`feed_dict` is deprecated. Please use `feed=`"

        if isinstance(feed, dict):
            feed_tensor_dict = dict()
            for feed_name in feed:
                feed_tensor = feed[feed_name]
                if not isinstance(feed_tensor, core.LoDTensor):
                    feed_tensor = core.LoDTensor()
                    # always set to CPU place, since the tensor need to be splitted
                    # it is fast in CPU
                    feed_tensor.set(feed[feed_name], core.CPUPlace())
                feed_tensor_dict[feed_name] = feed_tensor

            self.executor.feed_and_split_tensor_into_local_scopes(
                feed_tensor_dict)
        elif isinstance(feed, list) or isinstance(feed, tuple):
            if len(feed) != len(self._act_places):
                raise ValueError(
                    "Feed a list of tensor, the list should be the same size as places"
                )

            res = list()

            for i, each in enumerate(feed):
                if not isinstance(each, dict):
                    raise TypeError(
                        "Each element of feed list should be a dict")
                res_dict = dict()
                for feed_name in each:
                    tensor = each[feed_name]
                    if not isinstance(tensor, core.LoDTensor):
                        tmp = core.LoDTensor()
                        tmp.set(tensor, self._act_places[i])
                        tensor = tmp
                    res_dict[feed_name] = tensor
                res.append(res_dict)
            self.executor.feed_tensors_into_local_scopes(res)

        fetch_var_name = '@FETCHED_VAR_NAME@'
        self.executor.run(fetch_list, fetch_var_name)
        arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array()
        return [arr[i] for i in range(len(arr))]
Пример #5
0
def create_lod_tensor(data, recursive_seq_lens, place):
    """
    Create a lod tensor from a numpy array, a list, or an existing lod tensor.

    Create a lod tensor by doing the following:

    1. Check that the length-based level of detail (LoD) also known as 
       recursive_sequence_lengths of the input is valid.

    2. Convert recursive_sequence_lengths to a offset-based LoD.

    3. Copy the data from a numpy array, a list or a existing lod tensor to
       CPU or GPU device (based on input place).

    4. Set the level of detail (LoD) using the offset-based LoD.
    
    Examples:

        Suppose we want LoDTensor to hold data for sequences of word, where each
        word is represented by an integer. If we want to create a LoDTensor to
        represent two sentences, one of 2 words, and one of 3 words.

        Then :code:`data` can be a numpy array of integers with shape (5, 1).
        :code:`recursive_seq_lens` will be [[2, 3]], indicating the length(# of words) in each
        sentence. This length-based :code:`recursive_seq_lens` [[2, 3]] will be converted to
        offset-based LoD [[0, 2, 5]] inside the function call.

    Please reference :ref:`api_guide_low_level_lod_tensor` for more details
    regarding LoD.

    Args:
        data(numpy.ndarray|list|LoDTensor): a numpy array or a LoDTensor or a
            list holding the data to be copied.
        recursive_seq_lens(list): a list of lists indicating the length-based level of detail 
            info specified by the user.
        place(Place): CPU or GPU place indicating where the data in the new
            LoDTensor will be stored.

    Returns:
        A fluid LoDTensor object with tensor data and recursive_seq_lens info.
    """
    if isinstance(data, core.LoDTensor):
        return create_lod_tensor(np.array(data), recursive_seq_lens, place)
    elif isinstance(data, list):
        # When input data is a list, it only deal with the case where the base element
        # is an index of shape [1] and dtype int64 (e.g., word id). Hence, the generated
        # LoDTensor will be of shape [n, 1] and dtype int64, where `n` is the total number
        # of words or other indexes in the sequence.
        new_recursive_seq_lens = []
        for seq in data:
            new_recursive_seq_lens.append(len(seq))
        assert [
            new_recursive_seq_lens
        ] == recursive_seq_lens, "data and recursive_seq_lens do not match"
        flattened_data = np.concatenate(data, axis=0).astype("int64")
        flattened_data = flattened_data.reshape([len(flattened_data), 1])
        return create_lod_tensor(flattened_data, recursive_seq_lens, place)
    elif isinstance(data, np.ndarray):
        tensor = core.LoDTensor()
        tensor.set(data, place)
        tensor.set_recursive_sequence_lengths(recursive_seq_lens)
        assert tensor.has_valid_recursive_sequence_lengths(
        ), "the provided lod info is invalid"
        return tensor
    else:
        raise TypeError(
            "data should be either a LoDTensor, a Numpy array or a list")