Ejemplo n.º 1
0
    def __radd__(self, other):
        """Calculate y + x.

        Parameters
        ----------
        other : Tensor
            The y.

        Returns
        -------
        Tensor
            The output tensor.

        """
        if not isinstance(other, Tensor):
            if not isinstance(other, np.ndarray):
                if not isinstance(other, list): other = [other]
            other = np.array(other, dtype=np.float32)
            tensor = Tensor(GetTensorName())
            ws.FeedTensor(tensor, other)
            other = tensor
        output = self.CreateOperator(inputs=[other, self],
                                     nout=1,
                                     op_type='RAdd')
        if self.shape is not None:
            output.shape = self.shape[:]
        return output
Ejemplo n.º 2
0
 def name(self, value):
     from .scope import _TENSOR_SCOPE
     if value is None:
         # ignore the scope for the name generated by uid
         self._name = GetTensorName()
     else:
         self._name = _TENSOR_SCOPE + value
Ejemplo n.º 3
0
 def __div__(self, other):
     if not isinstance(other, Tensor):
         if not isinstance(other, np.ndarray):
             if not isinstance(other, list): other = [other]
         other = np.array(other, dtype=np.float32)
         tensor = Tensor(GetTensorName())
         ws.FeedTensor(tensor, other)
         other = tensor
     output = self.CreateOperator(inputs=[self, other], nout=1, op_type='Div')
     if self.shape is not None:
         output.shape = self.shape[:]
     return output
Ejemplo n.º 4
0
 def name(self, value):
     from .scope import TENSOR_SCOPE
     if value is None: self._name = TENSOR_SCOPE + GetTensorName()
     else: self._name = TENSOR_SCOPE + value
Ejemplo n.º 5
0
 def wrapper_indices(indices):
     tensor = Tensor(GetTensorName())
     ws.FeedTensor(tensor, np.array(indices, dtype=np.float32))
     return tensor
Ejemplo n.º 6
0
def function(inputs=None, outputs=None, givens=None, updater=None):
    """Return a callable function that will compute ``outputs`` or apply ``updater``.

    Set ``inputs`` to feed inputs into this callable function.

    Set ``givens`` to substitute some tensors before making the computation graph.

    Set ``updater`` to make update graph, but the update targets should be generated before.

    Parameters
    ----------
    inputs : Tensor, list of Tensor or None
        The inputs to feed.
    outputs : Tensor, list of Tensor or None
        The outputs to solve.
    givens : dict or None
        The substitutions to use.
    updater : BaseUpdater
        The updater to use.

    Returns
    -------
    function
        The callable function.

    Examples
    --------
    >>> x = Tensor('x').Variable()
    >>> y = x * 2
    >>> f = theano.function(outputs=y)
    >>> x.set_value(np.ones((2, 3), dtype=np.float32))
    >>> print(f())
    >>> [[ 2.  2.  2.]
         [ 2.  2.  2.]]

    >>> f = theano.function(inputs=x, outputs=y)
    >>> print(f(np.ones((2, 3), dtype=np.float32)))
    >>> [[ 2.  2.  2.]
         [ 2.  2.  2.]]

    """
    if not isinstance(inputs, list):
        if inputs is None:
            inputs = []
        else:
            inputs = [inputs]
    if not isinstance(outputs, list):
        if outputs is None:
            outputs = []
        else:
            outputs = [outputs]

    if len(outputs) > 0 and updater is not None:
        raise RuntimeError(
            'You can specific either outputs or updater, not both.')

    all_exprs = {}
    all_extra_targets = set()
    if not isinstance(outputs, list): outputs = [outputs]

    meta_graph = pb.GraphDef()

    meta_graph.name = 'Graph_' + str(ws.CURRENT_GRAPH_IDX)
    ws.CURRENT_GRAPH_IDX += 1

    # extract operators and targets from expressions
    existing_grads = False
    for output in outputs:
        meta_graph.target.extend([output.name])
        if sys.version_info >= (3, 0):
            all_exprs = OrderedDict(all_exprs, **output.expressions)
        else:
            all_exprs = dict(all_exprs, **output.expressions)
        all_extra_targets = all_extra_targets.union(output.extra_targets)
        if len(output.grad_wrts) > 0: existing_grads = True

    # we should sort out the topology of these operators before using
    all_exprs = sorted(all_exprs.items(), key=lambda d: d[0])
    forward_ops = copy.deepcopy([v for k, v in all_exprs])

    # handle givens
    if givens is not None:
        name_dict = {}
        external_input_exprs = {}

        for old_tenosr, new_tensor in givens.items():
            if isinstance(new_tensor, Tensor):
                name_dict[old_tenosr.name] = new_tensor._name
                if sys.version_info >= (3, 0):
                    external_input_exprs = OrderedDict(
                        external_input_exprs, **new_tensor.expressions)
                else:
                    external_input_exprs = dict(external_input_exprs,
                                                **new_tensor.expressions)
                external_input_exprs = OrderedDict(
                    sorted(external_input_exprs.items(), key=lambda A: A[0]))
            elif isinstance(new_tensor, np.ndarray):
                ws.FeedTensor(new_tensor, GetTensorName())
            all_extra_targets = all_extra_targets.union(
                new_tensor.extra_targets)
        external_input_ops = [v for k, v in external_input_exprs.items()]
        for op in forward_ops:
            op.input.extend([
                name_dict[input] if input in name_dict else input
                for input in op.input
            ])
            del op.input[:int(len(op.input) / 2)]

        forward_ops = external_input_ops + forward_ops

    # handle grads
    if existing_grads:
        targets = [output.name for output in outputs]
        targets.extend(all_extra_targets)
        forward_ops, grad_ops = GraphGradientMaker.Make(forward_ops, targets)
    else:
        grad_ops = []

    # Write Ops
    meta_graph.op.extend(forward_ops + grad_ops)

    # Write Extra Targets
    for extra_target in all_extra_targets:
        meta_graph.target.extend([extra_target])

    # Write Misc
    if len(outputs) > 0:
        GraphDef_Device(meta_graph)
        GraphDef_Opt(meta_graph)
        GraphDef_Grad(meta_graph, outputs)
        GraphDef_Phase(meta_graph, outputs)

    elif updater is not None:
        GraphDef_Device(meta_graph)
        GraphDef_Opt(meta_graph)
        GraphDef_Update(meta_graph, updater)

    # call c api to create graph
    ws.CreateGraph(meta_graph)

    # return a lambda point to run this graph
    return lambda *args, **kwargs: \
        ws.RunGraph(meta_graph.name, (inputs, args), outputs, **kwargs)
Ejemplo n.º 7
0
def function(inputs=[], outputs=[], swaps=None, updater=None):
    """ return a excutable function for a graph """
    if not isinstance(inputs, list): inputs = [inputs]
    if not isinstance(outputs, list): outputs = [outputs]
    if len(outputs) > 0 and updater is not None:
        raise RuntimeError('outputs or updater must be in 2 function.')

    all_exprs = {}
    all_extra_targets = set()
    if not isinstance(outputs, list): outputs = [outputs]

    graph_def = pb.GraphDef()

    graph_def.name = 'Graph_' + str(ws.CURRENT_GRAPH_IDX)
    ws.CURRENT_GRAPH_IDX += 1

    # extract operators and targets from expressions
    existing_grads = False
    for output in outputs:
        graph_def.target.extend([output.name])
        if sys.version_info >= (3, 0):
            all_exprs = OrderedDict(all_exprs, **output.expressions)
        else:
            all_exprs = dict(all_exprs, **output.expressions)
        all_extra_targets = all_extra_targets.union(output.extra_targets)
        if len(output.grad_wrts) > 0: existing_grads = True
    for extra_target in all_extra_targets:
        graph_def.target.extend([extra_target])

    # we should sort out the topology of these operators before using
    all_exprs = sorted(all_exprs.items(), key=lambda d: d[0])
    forward_ops = copy.deepcopy([v for k, v in all_exprs])

    # handle swap
    if swaps is not None:
        name_dict = {}
        external_input_exprs = {}

        for old_tenosr, new_tensor in swaps.items():
            if isinstance(new_tensor, Tensor):
                name_dict[old_tenosr.name] = new_tensor._name
                if sys.version_info >= (3, 0):
                    external_input_exprs = OrderedDict(
                        external_input_exprs, **new_tensor.expressions)
                else:
                    external_input_exprs = dict(external_input_exprs,
                                                **new_tensor.expressions)
            elif isinstance(new_tensor, np.ndarray):
                ws.FeedTensor(new_tensor, GetTensorName())
        external_input_ops = [v for k, v in external_input_exprs.items()]
        for op in forward_ops:
            op.input.extend([
                name_dict[input] if input in name_dict else input
                for input in op.input
            ])
            del op.input[:int(len(op.input) / 2)]

        forward_ops = external_input_ops + forward_ops

    # handle grads
    if existing_grads:
        targets = [output.name for output in outputs]
        forward_ops, grad_ops = GraphGradientMaker.Make(forward_ops, targets)
    else:
        grad_ops = []
    graph_def.op.extend(forward_ops + grad_ops)

    if len(outputs) > 0:
        GraphDef_Device(graph_def)
        GraphDef_Opt(graph_def)
        GraphDef_Grad(graph_def, outputs)
        GraphDef_Phase(graph_def, outputs)

    elif updater is not None:
        GraphDef_Device(graph_def)
        GraphDef_Opt(graph_def)
        GraphDef_Update(graph_def, updater)

    # call c api to create graph
    ws.CreateGraph(graph_def)

    # return a lambda point to run this graph
    return lambda *args, **kwargs: \
        ws.RunGraph(graph_def.name, (inputs, args), outputs, **kwargs)
Ejemplo n.º 8
0
    def define(self, inputs=None, outputs=None, givens=None, updater=None):
        if not isinstance(inputs, list):
            if inputs is None:
                inputs = []
            else:
                inputs = [inputs]
        if not isinstance(outputs, list):
            if outputs is None:
                outputs = []
            else:
                outputs = [outputs]

        if len(outputs) > 0 and updater is not None:
            raise RuntimeError(
                'You can specific either outputs or updater, not both.')

        all_expressions = dict()
        all_extra_targets = set()
        if not isinstance(outputs, list): outputs = [outputs]

        meta_graph = self.meta_graph

        # Extract operators and targets from expressions
        existing_grads = False
        for output in outputs:
            meta_graph.target.extend([output.name])
            all_expressions.update(output.expressions)
            all_extra_targets = all_extra_targets.union(output.extra_targets)
            if len(output.grad_wrts) > 0: existing_grads = True

        # We should sort out the topology of these operators before using
        all_exprs = sorted(all_expressions.items(), key=lambda d: d[0])
        forward_ops = copy.deepcopy([v for k, v in all_exprs])

        # Handle givens
        if givens is not None:
            name_dict = {}
            external_input_expressions = {}
            # Extract new ops
            for old_tensor, new_tensor in givens.items():
                if isinstance(new_tensor, Tensor):
                    name_dict[old_tensor.name] = new_tensor.name
                    external_input_expressions.update(new_tensor.expressions)
                elif isinstance(new_tensor, np.ndarray):
                    ws.FeedTensor(new_tensor, GetTensorName())
                all_extra_targets = all_extra_targets.union(
                    new_tensor.extra_targets)
            external_input_expressions = sorted(
                external_input_expressions.items(), key=lambda d: d[0])
            external_input_ops = [v for k, v in external_input_expressions]
            # Update original ops
            for op in forward_ops:
                op.input.extend([
                    name_dict[input] if input in name_dict else input
                    for input in op.input
                ])
                del op.input[:int(len(op.input) / 2)]
            # Concat them together
            forward_ops = external_input_ops + forward_ops

        # Handle grads
        if existing_grads:
            targets = [output.name for output in outputs]
            targets.extend(all_extra_targets)
            forward_ops, grad_ops, _ = \
                GraphGradientMaker.Make(forward_ops, targets)
        else:
            grad_ops = []

        # Write Ops
        meta_graph.op.extend(forward_ops + grad_ops)

        # Write Extra Targets
        for extra_target in all_extra_targets:
            meta_graph.target.extend([extra_target])

        # Write Misc
        if len(outputs) > 0:
            GraphDef_Device(meta_graph)
            GraphDef_Opt(meta_graph)
            GraphDef_Grad(meta_graph, outputs)
            GraphDef_Phase(meta_graph, outputs)

        elif updater is not None:
            GraphDef_Device(meta_graph)
            GraphDef_Opt(meta_graph)
            GraphDef_Update(meta_graph, updater)

        # Call c api to create graph
        ws.CreateGraph(meta_graph)

        # Bind a lambda callback to run this graph
        self.callback = lambda *args, **kwargs: \
            ws.RunGraph(meta_graph.name, (inputs, args), outputs, **kwargs)

        # Self return
        return self