Example #1
0
    def import_from(self, graph_def, explicit_inputs=False):
        """Import the defined function from a graph def.

        Set ``explicit_inputs`` to ``False``,

        if you want to feed the inputs as ``workspace.FeedTensor(self.inputs)``

        Parameters
        ----------
        meta_graph : GraphDef
            The definition of graph.
        explicit_inputs : boolean
            Whether to enforce feeding on executing.

        Returns
        -------
        Function
            The self.

        """
        self.inputs = [_Tensor(input).Variable() for input in graph_def.input]
        self.outputs = [_Tensor(output) for output in graph_def.output]

        _inject_device(graph_def)
        _inject_optimization(graph_def)
        _inject_phase(graph_def, self.outputs)

        # Store for future development
        self.meta_graph = graph_def

        # Call c api to create graph
        self.graph_name = _workspace.CreateGraph(graph_def)

        # Bind a lambda callback to run this graph
        callback_inputs = self.inputs if explicit_inputs else []
        self.callback = lambda *args, **kwargs: \
            _workspace.RunGraph(
                self.graph_name,
                    (callback_inputs, args),
                        self.outputs, **kwargs)

        # Return self
        return self
Example #2
0
def native_run_graph(graph_def, inputs, initializer, init_func=None):
    # De-Optimization
    for i in range(len(graph_def.arg)):
        if graph_def.arg[i].name == 'optimization_level':
            graph_def.arg[i].i = 0

    # Create an anonymous workspace
    ws = _workspace.Workspace()

    with ws.as_default():
        # Register all the initializer before feeding them
        for name in initializer:
            _Tensor(name=name).Variable()

        # Feed the given values if necessary
        if init_func: init_func()

        # Feed the external inputs
        for name, blob in inputs.items():
            _workspace.FeedTensor(name, blob)

        # Create and Run the graph
        graph_name = _workspace.CreateGraph(graph_def)
        _workspace.RunGraph(graph_name, return_outputs=False)

        # Fetch the outputs
        output_names = graph_def.output
        output_values = [_workspace.FetchTensor(name) for name in output_names]

        # Fetch the initializer
        initializer = [
            numpy_helper.from_array(
                _workspace.FetchTensor(name), name=name)
                    for name in initializer
        ]

    # Return the outputs
    return ws, namedtupledict('Outputs', output_names)(*output_values), initializer
Example #3
0
def function(inputs=None, outputs=None, givens=None, updater=None):
    """Return a callable function that will compute ``outputs`` or apply ``updater``.

    Set ``inputs`` to feed inputs into this callable function.

    Set ``givens`` to substitute some tensors before making the computation graph.

    Set ``updater`` to make update graph, but the update targets should be generated before.

    Parameters
    ----------
    inputs : Tensor, list of Tensor or None
        The inputs to feed.
    outputs : Tensor, list of Tensor or None
        The outputs to solve.
    givens : dict or None
        The substitutions to use.
    updater : BaseUpdater
        The updater to use.

    Returns
    -------
    function
        The callable function.

    Examples
    --------
    >>> x = Tensor('x').Variable()
    >>> y = x * 2
    >>> f = theano.function(outputs=y)
    >>> x.set_value(np.ones((2, 3), dtype=np.float32))
    >>> print(f())
    >>> [[ 2.  2.  2.]
         [ 2.  2.  2.]]

    >>> f = theano.function(inputs=x, outputs=y)
    >>> print(f(np.ones((2, 3), dtype=np.float32)))
    >>> [[ 2.  2.  2.]
         [ 2.  2.  2.]]

    """
    if not isinstance(inputs, list):
        if inputs is None:
            inputs = []
        else:
            inputs = [inputs]
    if not isinstance(outputs, list):
        if outputs is None:
            outputs = []
        else:
            outputs = [outputs]

    if len(outputs) > 0 and updater is not None:
        raise RuntimeError(
            'You can specific either outputs or updater, not both.')

    all_exprs = {}
    all_extra_targets = set()
    if not isinstance(outputs, list): outputs = [outputs]

    meta_graph = pb.GraphDef()

    meta_graph.name = 'Graph_' + str(ws.CURRENT_GRAPH_IDX)
    ws.CURRENT_GRAPH_IDX += 1

    # extract operators and targets from expressions
    existing_grads = False
    for output in outputs:
        meta_graph.target.extend([output.name])
        if sys.version_info >= (3, 0):
            all_exprs = OrderedDict(all_exprs, **output.expressions)
        else:
            all_exprs = dict(all_exprs, **output.expressions)
        all_extra_targets = all_extra_targets.union(output.extra_targets)
        if len(output.grad_wrts) > 0: existing_grads = True

    # we should sort out the topology of these operators before using
    all_exprs = sorted(all_exprs.items(), key=lambda d: d[0])
    forward_ops = copy.deepcopy([v for k, v in all_exprs])

    # handle givens
    if givens is not None:
        name_dict = {}
        external_input_exprs = {}

        for old_tenosr, new_tensor in givens.items():
            if isinstance(new_tensor, Tensor):
                name_dict[old_tenosr.name] = new_tensor._name
                if sys.version_info >= (3, 0):
                    external_input_exprs = OrderedDict(
                        external_input_exprs, **new_tensor.expressions)
                else:
                    external_input_exprs = dict(external_input_exprs,
                                                **new_tensor.expressions)
                external_input_exprs = OrderedDict(
                    sorted(external_input_exprs.items(), key=lambda A: A[0]))
            elif isinstance(new_tensor, np.ndarray):
                ws.FeedTensor(new_tensor, GetTensorName())
            all_extra_targets = all_extra_targets.union(
                new_tensor.extra_targets)
        external_input_ops = [v for k, v in external_input_exprs.items()]
        for op in forward_ops:
            op.input.extend([
                name_dict[input] if input in name_dict else input
                for input in op.input
            ])
            del op.input[:int(len(op.input) / 2)]

        forward_ops = external_input_ops + forward_ops

    # handle grads
    if existing_grads:
        targets = [output.name for output in outputs]
        targets.extend(all_extra_targets)
        forward_ops, grad_ops = GraphGradientMaker.Make(forward_ops, targets)
    else:
        grad_ops = []

    # Write Ops
    meta_graph.op.extend(forward_ops + grad_ops)

    # Write Extra Targets
    for extra_target in all_extra_targets:
        meta_graph.target.extend([extra_target])

    # Write Misc
    if len(outputs) > 0:
        GraphDef_Device(meta_graph)
        GraphDef_Opt(meta_graph)
        GraphDef_Grad(meta_graph, outputs)
        GraphDef_Phase(meta_graph, outputs)

    elif updater is not None:
        GraphDef_Device(meta_graph)
        GraphDef_Opt(meta_graph)
        GraphDef_Update(meta_graph, updater)

    # call c api to create graph
    ws.CreateGraph(meta_graph)

    # return a lambda point to run this graph
    return lambda *args, **kwargs: \
        ws.RunGraph(meta_graph.name, (inputs, args), outputs, **kwargs)
Example #4
0
def function(inputs=[], outputs=[], swaps=None, updater=None):
    """ return a excutable function for a graph """
    if not isinstance(inputs, list): inputs = [inputs]
    if not isinstance(outputs, list): outputs = [outputs]
    if len(outputs) > 0 and updater is not None:
        raise RuntimeError('outputs or updater must be in 2 function.')

    all_exprs = {}
    all_extra_targets = set()
    if not isinstance(outputs, list): outputs = [outputs]

    graph_def = pb.GraphDef()

    graph_def.name = 'Graph_' + str(ws.CURRENT_GRAPH_IDX)
    ws.CURRENT_GRAPH_IDX += 1

    # extract operators and targets from expressions
    existing_grads = False
    for output in outputs:
        graph_def.target.extend([output.name])
        if sys.version_info >= (3, 0):
            all_exprs = OrderedDict(all_exprs, **output.expressions)
        else:
            all_exprs = dict(all_exprs, **output.expressions)
        all_extra_targets = all_extra_targets.union(output.extra_targets)
        if len(output.grad_wrts) > 0: existing_grads = True
    for extra_target in all_extra_targets:
        graph_def.target.extend([extra_target])

    # we should sort out the topology of these operators before using
    all_exprs = sorted(all_exprs.items(), key=lambda d: d[0])
    forward_ops = copy.deepcopy([v for k, v in all_exprs])

    # handle swap
    if swaps is not None:
        name_dict = {}
        external_input_exprs = {}

        for old_tenosr, new_tensor in swaps.items():
            if isinstance(new_tensor, Tensor):
                name_dict[old_tenosr.name] = new_tensor._name
                if sys.version_info >= (3, 0):
                    external_input_exprs = OrderedDict(
                        external_input_exprs, **new_tensor.expressions)
                else:
                    external_input_exprs = dict(external_input_exprs,
                                                **new_tensor.expressions)
            elif isinstance(new_tensor, np.ndarray):
                ws.FeedTensor(new_tensor, GetTensorName())
        external_input_ops = [v for k, v in external_input_exprs.items()]
        for op in forward_ops:
            op.input.extend([
                name_dict[input] if input in name_dict else input
                for input in op.input
            ])
            del op.input[:int(len(op.input) / 2)]

        forward_ops = external_input_ops + forward_ops

    # handle grads
    if existing_grads:
        targets = [output.name for output in outputs]
        forward_ops, grad_ops = GraphGradientMaker.Make(forward_ops, targets)
    else:
        grad_ops = []
    graph_def.op.extend(forward_ops + grad_ops)

    if len(outputs) > 0:
        GraphDef_Device(graph_def)
        GraphDef_Opt(graph_def)
        GraphDef_Grad(graph_def, outputs)
        GraphDef_Phase(graph_def, outputs)

    elif updater is not None:
        GraphDef_Device(graph_def)
        GraphDef_Opt(graph_def)
        GraphDef_Update(graph_def, updater)

    # call c api to create graph
    ws.CreateGraph(graph_def)

    # return a lambda point to run this graph
    return lambda *args, **kwargs: \
        ws.RunGraph(graph_def.name, (inputs, args), outputs, **kwargs)
Example #5
0
    def define(self, inputs=None, outputs=None, givens=None, updater=None):
        if not isinstance(inputs, list):
            if inputs is None:
                inputs = []
            else:
                inputs = [inputs]
        if not isinstance(outputs, list):
            if outputs is None:
                outputs = []
            else:
                outputs = [outputs]

        if len(outputs) > 0 and updater is not None:
            raise RuntimeError(
                'You can specific either outputs or updater, not both.')

        all_expressions = dict()
        all_extra_targets = set()
        if not isinstance(outputs, list): outputs = [outputs]

        meta_graph = self.meta_graph

        # Extract operators and targets from expressions
        existing_grads = False
        for output in outputs:
            meta_graph.output.extend([output.name])
            all_expressions.update(output.expressions)
            all_extra_targets = all_extra_targets.union(output.extra_targets)
            if output.gradient.required(): existing_grads = True

        # We should sort out the topology of these operators before using
        all_expressions = sorted(all_expressions.items(), key=lambda d: d[0])
        forward_ops = copy.deepcopy([v for k, v in all_expressions])

        # Handle givens
        if givens is not None:
            name_dict = {}
            external_input_expressions = {}
            # Extract new ops
            for old_tensor, new_tensor in givens.items():
                if isinstance(new_tensor, _Tensor):
                    name_dict[old_tensor.name] = new_tensor.name
                    external_input_expressions.update(new_tensor.expressions)
                else:
                    raise ValueError('Excepted a Tensor, '
                                     'while got {}.'.format(
                                         type(new_tensor).__name__))
                all_extra_targets = all_extra_targets.union(
                    new_tensor.extra_targets)
            external_input_expressions = sorted(
                external_input_expressions.items(), key=lambda d: d[0])
            external_input_ops = [v for k, v in external_input_expressions]
            # Update original ops
            for op in forward_ops:
                op.input.extend([
                    name_dict[input] if input in name_dict else input
                    for input in op.input
                ])
                del op.input[:int(len(op.input) / 2)]
            # Concat them together
            forward_ops = external_input_ops + forward_ops

        # Handle grads
        if existing_grads:
            targets = [output.name for output in outputs]
            targets.extend(all_extra_targets)
            forward_ops, grad_ops, _ = \
                _gradient_maker.GraphGradientMaker \
                    .Make(forward_ops, targets)
        else:
            grad_ops = []

        # Write Ops
        meta_graph.op.extend(forward_ops + grad_ops)

        # Write Extra Targets
        for extra_target in all_extra_targets:
            meta_graph.output.extend([extra_target])

        # Write External Inputs
        for input in inputs:
            meta_graph.input.extend([input.name])

        self.inputs, self.outputs = inputs, outputs

        # Inject arguments based on global options
        if len(outputs) > 0:
            _inject_device(meta_graph)
            _inject_optimization(meta_graph)
            _inject_gradients(meta_graph, outputs)
            _inject_phase(meta_graph, outputs)

        elif updater is not None:
            _inject_device(meta_graph)
            _inject_optimization(meta_graph, opt_level=0)
            _inject_update_ops(meta_graph, updater)

        # Call c api to create graph
        self.graph_name = _workspace.CreateGraph(meta_graph)

        # Bind a lambda callback to run this graph
        self.callback = lambda *args, **kwargs: \
            _workspace.RunGraph(
                graph_name=self.graph_name,
                    inputs=(inputs, args),
                        outputs=outputs, **kwargs)

        # Return the self
        return self
Example #6
0
    def define(self, inputs=None, outputs=None, givens=None, updater=None):
        if not isinstance(inputs, list):
            if inputs is None:
                inputs = []
            else:
                inputs = [inputs]
        if not isinstance(outputs, list):
            if outputs is None:
                outputs = []
            else:
                outputs = [outputs]

        if len(outputs) > 0 and updater is not None:
            raise RuntimeError(
                'You can specific either outputs or updater, not both.')

        all_expressions = dict()
        all_extra_targets = set()
        if not isinstance(outputs, list): outputs = [outputs]

        meta_graph = self.meta_graph

        # Extract operators and targets from expressions
        existing_grads = False
        for output in outputs:
            meta_graph.target.extend([output.name])
            all_expressions.update(output.expressions)
            all_extra_targets = all_extra_targets.union(output.extra_targets)
            if len(output.grad_wrts) > 0: existing_grads = True

        # We should sort out the topology of these operators before using
        all_exprs = sorted(all_expressions.items(), key=lambda d: d[0])
        forward_ops = copy.deepcopy([v for k, v in all_exprs])

        # Handle givens
        if givens is not None:
            name_dict = {}
            external_input_expressions = {}
            # Extract new ops
            for old_tensor, new_tensor in givens.items():
                if isinstance(new_tensor, Tensor):
                    name_dict[old_tensor.name] = new_tensor.name
                    external_input_expressions.update(new_tensor.expressions)
                elif isinstance(new_tensor, np.ndarray):
                    ws.FeedTensor(new_tensor, GetTensorName())
                all_extra_targets = all_extra_targets.union(
                    new_tensor.extra_targets)
            external_input_expressions = sorted(
                external_input_expressions.items(), key=lambda d: d[0])
            external_input_ops = [v for k, v in external_input_expressions]
            # Update original ops
            for op in forward_ops:
                op.input.extend([
                    name_dict[input] if input in name_dict else input
                    for input in op.input
                ])
                del op.input[:int(len(op.input) / 2)]
            # Concat them together
            forward_ops = external_input_ops + forward_ops

        # Handle grads
        if existing_grads:
            targets = [output.name for output in outputs]
            targets.extend(all_extra_targets)
            forward_ops, grad_ops, _ = \
                GraphGradientMaker.Make(forward_ops, targets)
        else:
            grad_ops = []

        # Write Ops
        meta_graph.op.extend(forward_ops + grad_ops)

        # Write Extra Targets
        for extra_target in all_extra_targets:
            meta_graph.target.extend([extra_target])

        # Write Misc
        if len(outputs) > 0:
            GraphDef_Device(meta_graph)
            GraphDef_Opt(meta_graph)
            GraphDef_Grad(meta_graph, outputs)
            GraphDef_Phase(meta_graph, outputs)

        elif updater is not None:
            GraphDef_Device(meta_graph)
            GraphDef_Opt(meta_graph)
            GraphDef_Update(meta_graph, updater)

        # Call c api to create graph
        ws.CreateGraph(meta_graph)

        # Bind a lambda callback to run this graph
        self.callback = lambda *args, **kwargs: \
            ws.RunGraph(meta_graph.name, (inputs, args), outputs, **kwargs)

        # Self return
        return self