def past_value(shape, x, time_step=1, default_hidden_activation=0.1, name=None): """ This function returns the past value wrt `x`. It is most often used when creating RNNs. The resulting tensor has the same shape as the input but is the previous logical sample. The `time_step` parameter is the number of steps to look into the past and is 1 by default. If there is no past value (i.e. the current sample is the first one in the tensor) then the `default_hidden_activation` value is returned which is 0.1 by default. Example: >>> data = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]]) >>> t = C.dynamic_axis(name='t') >>> x = C.input_numpy([data], dynamic_axis=t) >>> with C.LocalExecutionContext('past_value') as ctx: ... print(ctx.eval(C.past_value(0, x))) [array([[ 0.1, 0.1, 0.1, 0.1], [ 1. , 2. , 3. , 4. ], [ 5. , 6. , 7. , 8. ]])] Args: shape: dimensions of the input `x` x: the tensor from which the past value is obtained time_step: the number of time steps to look into the past (default 1) default_hidden_activation: the default value to use when no past value is available (default 0.1) Returns: :class:`cntk.graph.ComputationNode` """ from cntk.ops.cntk1 import PastValue op = PastValue(shape, x, time_step, default_hidden_activation, name = name) wrap_numpy_arrays(op) op.rank = get_rank(shape) return op
def past_value(dims, x, time_step=1, default_hidden_activation=0.1, name=None): """ This function returns the past value wrt `x`. It is most often used when creating RNNs. The resulting tensor has the same shape as the input but is the previous logical sample. The `time_step` parameter is the number of steps to look into the past and is 1 by default. If there is no past value (i.e. the current sample is the first one in the tensor) then the `default_hidden_activation` value is returned which is 0.1 by default. Example: >>> data = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]]) >>> t = C.dynamic_axis(name='t') >>> x = C.input_numpy([data], dynamic_axis=t) >>> with C.LocalExecutionContext('past_value') as ctx: ... print(ctx.eval(C.past_value(0, x))) [array([[ 0.1, 0.1, 0.1, 0.1], [ 1. , 2. , 3. , 4. ], [ 5. , 6. , 7. , 8. ]])] Args: dims: dimensions of the input `x` x: the tensor from which the past value is obtained time_step: the number of time steps to look into the past (default 1) default_hidden_activation: the default value to use when no past value is available (default 0.1) Returns: :class:`cntk.graph.ComputationNode` """ from cntk.ops.cntk1 import PastValue return PastValue(dims, x, time_step, default_hidden_activation, name=name)
def test_loose_coupling(): from cntk.ops.cntk1 import PastValue dh = PastValue(1, 'outnode') out = Times(dh, Constant(2), var_name='outnode') expected = ['v0 = PastValue(1, outnode, timeStep=1, defaultHiddenActivation=0.1)', 'v1 = Constant(2, rows=1, cols=1)', 'outnode = Times(v0, v1, outputRank=1)'] description, has_inputs, readers = out.to_config() assert _to_list(description) == expected
def test_loose_coupling(): from cntk.ops.cntk1 import PastValue dh = PastValue(1, 'outnode') out = times(dh, constant(2), name='outnode') expected = [ 'v0 = PastValue(1, outnode, timeStep=1, defaultHiddenActivation=0.1)', "v1 = ParameterTensor(1, learningRateMultiplier=0.0, init='fromLiteral', initValueScale=1, value=0, initFromFilePath='', initFromLiteral='2.0000", "', initOnCPUOnly=true, randomSeed=-1)", 'outnode = Times(v0, v1, outputRank=1)' ] description, has_inputs, readers = out.to_config() assert _to_list(description) == expected