예제 #1
0
    def forward(self, arguments, outputs, keep_for_backward=None, device=None):
        '''
        Computes the values of speficied variables in ``outputs``, using values
        provided in ``arguments`` that correspond to each input `Variable` of
        the function whose ``is_input`` is `True`.

        Example:
            >>> v = C.input_variable(shape=(3,))
            >>> f = C.reciprocal(v)
            >>> _, fv = f.forward({v:[[1, 2, 4]]}, [f.output])
            >>> list(fv.values())[0]
            array([[[ 1.  ,  0.5 ,  0.25]]], dtype=float32)

        Args:
            arguments: maps variables to their
             input data. The interpretation depends on the input type:

               * dict: keys are input variable or names, and values are the input data.
               * any other type: if node has an unique input, ``arguments`` is mapped to this input.
                For nodes with more than one input, only dict is allowed.
             In both cases, every every sample in the data will be interpreted
             as a new sequence. To mark samples as continuations of the
             previous sequence, specify ``arguments`` as ``tuple``: the
             first element will be used as ``arguments``, and the second one will
             be used as a list of bools, denoting whether a sequence is a new
             one (`True`) or a continuation of the previous one (`False`).
             Data should be either NumPy arrays or a
             :class:`~cntk.io.MinibatchData` instance.
            outputs (iterable): outputs to fetch values for.
            keep_for_backward (set, default `None`): the subset of the
             Function's output variables for which gradients shall be calculated
             in a subsequent backward call. If `None`, the returned state will
             be `None` and a subsequent call to :func:`backward` will not be
             possible.
            device (:class:`~cntk.device.DeviceDescriptor`, default `None`): the device
             descriptor that contains the type and id of the device on which the
             computation is. If `None`, the default device is used.

        Returns:
             A tuple (BackpropState, map of outputs to NumPy arrays). The
             BackpropState is a handle taken by :func:`backward`.
        '''
        if device is None:
            device = DeviceDescriptor.use_default_device()

        in_var_map = sanitize_var_map(self.arguments, arguments, None, device)
        output_map = {v: None for v in outputs}
        keep_for_backward = set(keep_for_backward or {})

        state = super(Function, self)._forward(in_var_map, output_map, device,
                                               keep_for_backward)

        for k in output_map:
            output_map[k] = value_to_seq(output_map[k])

        return state, output_map
예제 #2
0
    def __init__(self, inputs, as_numpy=True, name=''):
        super(UserFunction, self).__init__(inputs, name)
        self.as_numpy = as_numpy

        # Since the state will frequently not be used, we cache the None-state
        # to speed up.
        self._none_state = cntk_py.UserBackPropState(
            self, DeviceDescriptor.cpu_device(), None)

        # Memory management for user defined functions has to be controlled by
        # the C++ side. For more information:
        # http://www.swig.org/Doc3.0/Python.html#Python_nn35
        self.__disown__()
예제 #3
0
파일: functions.py 프로젝트: rlugojr/CNTK
    def __init__(self, inputs, as_numpy=True, name=''):
        super(UserFunction, self).__init__(inputs, name)
        self.as_numpy = as_numpy

        # Since the state will frequently not be used, we cache the None-state
        # to speed up.
        self._none_state =  cntk_py.UserBackPropState(self,
                DeviceDescriptor.cpu_device(), None)

        # Memory management for user defined functions has to be controlled by
        # the C++ side. For more information:
        # http://www.swig.org/Doc3.0/Python.html#Python_nn35
        self.__disown__()
예제 #4
0
파일: functions.py 프로젝트: liviust/CNTK
def load_model(filename, device=None):
    '''
    Load the model in ``filename``, that has been saved using
    :func:`~cntk.ops.functions.Function.save_model`.

    Args:
        filename (str): filename to load the model from
        device (:class:`~cntk.device.DeviceDescriptor`, default is the default device):
         instance of DeviceDescriptor

    Returns:
        root node
    '''
    if not device:
        device = DeviceDescriptor.use_default_device()
    return cntk_py.Function.load_model(filename, device)
예제 #5
0
파일: functions.py 프로젝트: FDecaYed/CNTK
    def load(filename, device=None):
        '''
        Load the model in ``filename``, that has been saved using
        :func:`~cntk.ops.functions.Function.save`.

        Args:
            filename (str): filename to load the model from
            device (:class:`~cntk.device.DeviceDescriptor`, default is the default device):
             instance of DeviceDescriptor

        Returns:
            root node
        '''
        if not device:
            device = DeviceDescriptor.use_default_device()
        return cntk_py.Function.load_model(filename, device)
예제 #6
0
파일: variables.py 프로젝트: FDecaYed/CNTK
    def __init__(self, value=None, shape=None, dtype=None, device=None, name=''):

        if dtype is None:
            if isinstance(value, np.ndarray):
                dtype = value.dtype
            else:
                dtype = np.float32

        if device is None:
            device = DeviceDescriptor.use_default_device()

        if np.isscalar(value):
            super(Constant, self).__init__(sanitize_shape(shape),
                    sanitize_dtype_cntk(dtype), value, device, name)
        else:
            ndav = sanitize_value(shape, value, dtype, device)
            super(Constant, self).__init__(ndav, name)
예제 #7
0
    def __init__(self,
                 value=None,
                 shape=None,
                 dtype=None,
                 device=None,
                 name=''):

        if dtype is None:
            if isinstance(value, np.ndarray):
                dtype = value.dtype
            else:
                dtype = np.float32

        if device is None:
            device = DeviceDescriptor.use_default_device()

        if np.isscalar(value):
            super(Constant, self).__init__(sanitize_shape(shape),
                                           sanitize_dtype_cntk(dtype), value,
                                           device, name)
        else:
            ndav = sanitize_value(shape, value, dtype, device)
            super(Constant, self).__init__(ndav, name)
예제 #8
0
    def forward(self,
                arguments,
                outputs=None,
                keep_for_backward=None,
                device=None,
                as_numpy=True):
        '''
        Computes the values of speficied variables in ``outputs``, using values
        provided in ``arguments`` that correspond to each input `Variable` of
        the function (i.e. those that have ``is_input = True``).

        Example:
            >>> # Example of passing dense data
            >>> v = C.input_variable(shape=(3,))
            >>> f = C.reciprocal(v)
            >>> _, fv = f.forward({v:[[1, 2, 4]]})
            >>> list(fv.values())[0]
            array([[[ 1.  ,  0.5 ,  0.25]]], dtype=float32)

        Example:
            >>> # Passing sparse values as one-hot with a vocabulary size of 5
            >>> vocab_size = 5
            >>> v = C.input_variable(shape=(vocab_size,), is_sparse=True)
            >>> f = C.times(v, np.eye(vocab_size))
            >>> # Passing a batch of two sequences:
            >>> # 1st sequence: word 1
            >>> # 2nd sequence: words 2 and 4
            >>> batch = [[1],[2,4]]
            >>> sparse_batch = C.one_hot(batch, vocab_size)
            >>> _, fv = f.forward({v:sparse_batch})
            >>> list(fv.values())[0]
            [array([[ 0.,  1.,  0.,  0.,  0.]], dtype=float32),
             array([[ 0.,  0.,  1.,  0.,  0.], [ 0.,  0.,  0.,  0.,  1.]], dtype=float32)]

        Example:
            >>> # Doing the same, but with a CSR matrix from scipy.sparse
            >>> vocab_size = 5
            >>> from scipy.sparse import csr_matrix
            >>> v = C.input_variable(shape=(vocab_size,), is_sparse=True)
            >>> f = C.times(v, np.eye(vocab_size))
            >>> # Note that csr_matrix automatically uses a sparse representation underneath.
            >>> sparse_batch = [csr_matrix([[0,1,0,0,0]]), csr_matrix([[0,0,1,0,0], [0,0,0,0,1]])]
            >>> _, fv = f.forward({v:sparse_batch})
            >>> list(fv.values())[0]
            [array([[ 0.,  1.,  0.,  0.,  0.]], dtype=float32),
             array([[ 0.,  0.,  1.,  0.,  0.], [ 0.,  0.,  0.,  0.,  1.]], dtype=float32)]
            <BLANKLINE>
            >>> # Much more efficient, however, is to incrementally create CSR arrays.
            >>> # See https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
            >>> # for more information.
            >>> def seq_to_csr_matrix(seq, vocab_size):
            ...     indptr = [0]
            ...     indices = []
            ...     data = []
            ...     for term_idx in seq:
            ...         indices.append(term_idx)
            ...         data.append(1)
            ...         indptr.append(len(indices))
            ...     return csr_matrix((data, indices, indptr), shape=(len(seq), vocab_size))
            >>> sparse_batch = [seq_to_csr_matrix(seq, vocab_size) for seq in batch]
            >>> _, fv = f.forward({v:sparse_batch})
            >>> list(fv.values())[0]
            [array([[ 0.,  1.,  0.,  0.,  0.]], dtype=float32),
             array([[ 0.,  0.,  1.,  0.,  0.], [ 0.,  0.,  0.,  0.,  1.]], dtype=float32)]


        Args:
            arguments: maps variables to their input data. The interpretation depends on
             the input type:

               * dict: keys are input variable or names, and values are the
                 input data. To specify a minibatch, provide a list of arrays.
                 The shape of each array must be compatible with the shape of
                 the dictionary key. If the array denotes a sequence then the
                 elements of the sequence are grouped along axis 0.
               * any other type: if node has an unique input, arguments is
                 mapped to this input.
             For nodes with more than one input, only dict is allowed.

             In both cases, every sample in the data will be interpreted
             as a new sequence.

             Sequences can be marked as continuations of the same sequence in
             the previous minibatch (that is the sequence in the same slot).
             There are two possibilities for this:

              * specifying arguments as a `tuple` where the first element is
                used as arguments and the second one will be used as a list
                of bools, denoting whether a sequence is a new one (`True`) or a
                continuation of the sequence in the same slot of the previous
                minibatch (`False`). This will be applied to all batches.
              * specifying arguments as a dictionary of variables to tuples
                where the first element is used as arguments and the second
                one will be used as a list of bools, denoting whether a sequence
                is a new one (`True`) or a continuation of the sequence in the
                same slot of the previous minibatch (`False`). This will be
                applied to all batches.

             Data should be either NumPy arrays or a
             :class:`~cntk.io.MinibatchData` instance.
            outputs (iterable, optional): outputs to fetch values for. If not
             set, all outputs of the function will be fetched.
            keep_for_backward (set, default `None`): the subset of the
             Function's output variables for which gradients shall be calculated
             in a subsequent backward call. If `None`, the returned state will
             be `None` and a subsequent call to :func:`backward` will not be
             possible.
            device (:class:`~cntk.device.DeviceDescriptor`, default `None`): the device
             descriptor that contains the type and id of the device on which the
             computation is. If `None`, the default device is used.
            as_numpy (bool): whether to return the result as a NumPy array. Default True.
             Specifying this as False returns a CNTK Value which avoids a
             costly conversion but returns a somewhat opaque object.

        Returns:
             A tuple (BackPropState, map of outputs to NumPy arrays). The
             BackPropState is a handle taken by :func:`backward`.
        '''
        if device is None:
            device = DeviceDescriptor.use_default_device()

        in_var_map = sanitize_var_map(self.arguments, arguments, None, device)
        if outputs is None:
            outputs = self.outputs

        output_map = {v: None for v in outputs}
        keep_for_backward = set(keep_for_backward or {})

        state = super(Function, self)._forward(in_var_map, output_map, device,
                                               keep_for_backward)
        if as_numpy:
            for k in output_map:
                output_map[k] = variable_value_to_seq(output_map[k], k)

        return state, output_map
예제 #9
0
파일: functions.py 프로젝트: FDecaYed/CNTK
    def forward(self, arguments, outputs, keep_for_backward=None, device=None):
        '''
        Computes the values of speficied variables in ``outputs``, using values
        provided in ``arguments`` that correspond to each input `Variable` of
        the function whose ``is_input`` is `True`.

        Example:
            >>> v = C.input_variable(shape=(3,))
            >>> f = C.reciprocal(v)
            >>> _, fv = f.forward({v:[[1, 2, 4]]}, [f.output])
            >>> list(fv.values())[0]
            array([[[ 1.  ,  0.5 ,  0.25]]], dtype=float32)

        Args:
            arguments: maps variables to their input data. The interpretation depends on
             the input type:

               * dict: keys are input variable or names, and values are the
                 input data. To specify a minibatch, provide a list of arrays.
                 The shape of each array must be compatible with the shape of
                 the dictionary key.If the array denotes a sequence then the
                 elements of the sequence are grouped along axis 0.
               * any other type: if node has an unique input, arguments is
                 mapped to this input.
             For nodes with more than one input, only dict is allowed.

             In both cases, every every sample in the data will be interpreted
             as a new sequence.

             Sequences can be marked as continuations of the same sequence in
             the previous minibatch (that is the sequence in the same slot).
             There are two possibilities for this:

              * specifying arguments as a `tuple` where the first element is
                used as arguments and the second one will be used as a list
                of bools, denoting whether a sequence is a new one (`True`) or a
                continuation of the sequence in the same slot of the previous
                minibatch (`False`). This will be applied to all batches.
              * specifying arguments as a dictionary of variables to tuples
                where the first element is used as arguments and the second
                one will be used as a list of bools, denoting whether a sequence
                is a new one (`True`) or a continuation of the sequence in the
                same slot of the previous minibatch (`False`). This will be
                applied to all batches.

             Data should be either NumPy arrays or a
             :class:`~cntk.io.MinibatchData` instance.
            outputs (iterable): outputs to fetch values for.
            keep_for_backward (set, default `None`): the subset of the
             Function's output variables for which gradients shall be calculated
             in a subsequent backward call. If `None`, the returned state will
             be `None` and a subsequent call to :func:`backward` will not be
             possible.
            device (:class:`~cntk.device.DeviceDescriptor`, default `None`): the device
             descriptor that contains the type and id of the device on which the
             computation is. If `None`, the default device is used.

        Returns:
             A tuple (BackPropState, map of outputs to NumPy arrays). The
             BackPropState is a handle taken by :func:`backward`.
        '''
        if device is None:
            device = DeviceDescriptor.use_default_device()

        in_var_map = sanitize_var_map(self.arguments, arguments,
                                      None, device)
        output_map = {v: None for v in outputs}
        keep_for_backward = set(keep_for_backward or {})

        state = super(Function, self)._forward(in_var_map, output_map, device,
                                             keep_for_backward)

        for k in output_map:
            output_map[k] = variable_value_to_seq(output_map[k], k)

        return state, output_map
예제 #10
0
파일: functions.py 프로젝트: rlugojr/CNTK
    def forward(self, arguments, outputs=None, keep_for_backward=None, device=None, as_numpy=True):
        '''
        Computes the values of speficied variables in ``outputs``, using values
        provided in ``arguments`` that correspond to each input `Variable` of
        the function (i.e. those that have ``is_input = True``).

        Example:
            >>> # Example of passing dense data
            >>> v = C.input_variable(shape=(3,))
            >>> f = C.reciprocal(v)
            >>> _, fv = f.forward({v:[[1, 2, 4]]})
            >>> list(fv.values())[0]
            array([[[ 1.  ,  0.5 ,  0.25]]], dtype=float32)

        Example:
            >>> # Passing sparse values as one-hot with a vocabulary size of 5
            >>> vocab_size = 5
            >>> v = C.input_variable(shape=(vocab_size,), is_sparse=True)
            >>> f = C.times(v, np.eye(vocab_size))
            >>> # Passing a batch of two sequences:
            >>> # 1st sequence: word 1
            >>> # 2nd sequence: words 2 and 4
            >>> batch = [[1],[2,4]]
            >>> sparse_batch = C.one_hot(batch, vocab_size)
            >>> _, fv = f.forward({v:sparse_batch})
            >>> list(fv.values())[0]
            [array([[ 0.,  1.,  0.,  0.,  0.]], dtype=float32),
             array([[ 0.,  0.,  1.,  0.,  0.], [ 0.,  0.,  0.,  0.,  1.]], dtype=float32)]

        Example:
            >>> # Doing the same, but with a CSR matrix from scipy.sparse
            >>> vocab_size = 5
            >>> from scipy.sparse import csr_matrix
            >>> v = C.input_variable(shape=(vocab_size,), is_sparse=True)
            >>> f = C.times(v, np.eye(vocab_size))
            >>> # Note that csr_matrix automatically uses a sparse representation underneath.
            >>> sparse_batch = [csr_matrix([[0,1,0,0,0]]), csr_matrix([[0,0,1,0,0], [0,0,0,0,1]])]
            >>> _, fv = f.forward({v:sparse_batch})
            >>> list(fv.values())[0]
            [array([[ 0.,  1.,  0.,  0.,  0.]], dtype=float32),
             array([[ 0.,  0.,  1.,  0.,  0.], [ 0.,  0.,  0.,  0.,  1.]], dtype=float32)]
            <BLANKLINE>
            >>> # Much more efficient, however, is to incrementally create CSR arrays.
            >>> # See https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
            >>> # for more information.
            >>> def seq_to_csr_matrix(seq, vocab_size):
            ...     indptr = [0]
            ...     indices = []
            ...     data = []
            ...     for term_idx in seq:
            ...         indices.append(term_idx)
            ...         data.append(1)
            ...         indptr.append(len(indices))
            ...     return csr_matrix((data, indices, indptr), shape=(len(seq), vocab_size))
            >>> sparse_batch = [seq_to_csr_matrix(seq, vocab_size) for seq in batch]
            >>> _, fv = f.forward({v:sparse_batch})
            >>> list(fv.values())[0]
            [array([[ 0.,  1.,  0.,  0.,  0.]], dtype=float32),
             array([[ 0.,  0.,  1.,  0.,  0.], [ 0.,  0.,  0.,  0.,  1.]], dtype=float32)]


        Args:
            arguments: maps variables to their input data. The interpretation depends on
             the input type:

               * dict: keys are input variable or names, and values are the
                 input data. To specify a minibatch, provide a list of arrays.
                 The shape of each array must be compatible with the shape of
                 the dictionary key. If the array denotes a sequence then the
                 elements of the sequence are grouped along axis 0.
               * any other type: if node has an unique input, arguments is
                 mapped to this input.
             For nodes with more than one input, only dict is allowed.

             In both cases, every sample in the data will be interpreted
             as a new sequence.

             Sequences can be marked as continuations of the same sequence in
             the previous minibatch (that is the sequence in the same slot).
             There are two possibilities for this:

              * specifying arguments as a `tuple` where the first element is
                used as arguments and the second one will be used as a list
                of bools, denoting whether a sequence is a new one (`True`) or a
                continuation of the sequence in the same slot of the previous
                minibatch (`False`). This will be applied to all batches.
              * specifying arguments as a dictionary of variables to tuples
                where the first element is used as arguments and the second
                one will be used as a list of bools, denoting whether a sequence
                is a new one (`True`) or a continuation of the sequence in the
                same slot of the previous minibatch (`False`). This will be
                applied to all batches.

             Data should be either NumPy arrays or a
             :class:`~cntk.io.MinibatchData` instance.
            outputs (iterable, optional): outputs to fetch values for. If not
             set, all outputs of the function will be fetched.
            keep_for_backward (set, default `None`): the subset of the
             Function's output variables for which gradients shall be calculated
             in a subsequent backward call. If `None`, the returned state will
             be `None` and a subsequent call to :func:`backward` will not be
             possible.
            device (:class:`~cntk.device.DeviceDescriptor`, default `None`): the device
             descriptor that contains the type and id of the device on which the
             computation is. If `None`, the default device is used.
            as_numpy (bool): whether to return the result as a NumPy array. Default True.
             Specifying this as False returns a CNTK Value which avoids a
             costly conversion but returns a somewhat opaque object.

        Returns:
             A tuple (BackPropState, map of outputs to NumPy arrays). The
             BackPropState is a handle taken by :func:`backward`.
        '''
        if device is None:
            device = DeviceDescriptor.use_default_device()

        in_var_map = sanitize_var_map(self.arguments, arguments,
                                      None, device)
        if outputs is None:
            outputs = self.outputs

        output_map = {v: None for v in outputs}
        keep_for_backward = set(keep_for_backward or {})

        state = super(Function, self)._forward(in_var_map, output_map, device,
                                             keep_for_backward)
        if as_numpy:
            for k in output_map:
                output_map[k] = variable_value_to_seq(output_map[k], k)

        return state, output_map
예제 #11
0
파일: functions.py 프로젝트: smdvw5/CNTK
    def forward(self,
                arguments,
                outputs,
                keep_for_backward=None,
                device=None,
                as_numpy=True):
        '''
        Computes the values of speficied variables in ``outputs``, using values
        provided in ``arguments`` that correspond to each input `Variable` of
        the function whose ``is_input`` is `True`.

        Example:
            >>> v = C.input_variable(shape=(3,))
            >>> f = C.reciprocal(v)
            >>> _, fv = f.forward({v:[[1, 2, 4]]}, [f.output])
            >>> list(fv.values())[0]
            array([[[ 1.  ,  0.5 ,  0.25]]], dtype=float32)

        Args:
            arguments: maps variables to their input data. The interpretation depends on
             the input type:

               * dict: keys are input variable or names, and values are the
                 input data. To specify a minibatch, provide a list of arrays.
                 The shape of each array must be compatible with the shape of
                 the dictionary key.If the array denotes a sequence then the
                 elements of the sequence are grouped along axis 0.
               * any other type: if node has an unique input, arguments is
                 mapped to this input.
             For nodes with more than one input, only dict is allowed.

             In both cases, every sample in the data will be interpreted
             as a new sequence.

             Sequences can be marked as continuations of the same sequence in
             the previous minibatch (that is the sequence in the same slot).
             There are two possibilities for this:

              * specifying arguments as a `tuple` where the first element is
                used as arguments and the second one will be used as a list
                of bools, denoting whether a sequence is a new one (`True`) or a
                continuation of the sequence in the same slot of the previous
                minibatch (`False`). This will be applied to all batches.
              * specifying arguments as a dictionary of variables to tuples
                where the first element is used as arguments and the second
                one will be used as a list of bools, denoting whether a sequence
                is a new one (`True`) or a continuation of the sequence in the
                same slot of the previous minibatch (`False`). This will be
                applied to all batches.

             Data should be either NumPy arrays or a
             :class:`~cntk.io.MinibatchData` instance.
            outputs (iterable): outputs to fetch values for.
            keep_for_backward (set, default `None`): the subset of the
             Function's output variables for which gradients shall be calculated
             in a subsequent backward call. If `None`, the returned state will
             be `None` and a subsequent call to :func:`backward` will not be
             possible.
            device (:class:`~cntk.device.DeviceDescriptor`, default `None`): the device
             descriptor that contains the type and id of the device on which the
             computation is. If `None`, the default device is used.
            as_numpy (bool): whether to return the result as a NumPy array. Default True.
             Specifying this as False returns a CNTK Value which avoids a 
             costly conversion but returns a somewhat opaque object.

        Returns:
             A tuple (BackPropState, map of outputs to NumPy arrays). The
             BackPropState is a handle taken by :func:`backward`.
        '''
        if device is None:
            device = DeviceDescriptor.use_default_device()

        in_var_map = sanitize_var_map(self.arguments, arguments, None, device)
        output_map = {v: None for v in outputs}
        keep_for_backward = set(keep_for_backward or {})

        state = super(Function, self)._forward(in_var_map, output_map, device,
                                               keep_for_backward)
        if as_numpy:
            for k in output_map:
                output_map[k] = variable_value_to_seq(output_map[k], k)

        return state, output_map