Esempio n. 1
0
    def get_gradient_compile_args(self,
                                  inputs,
                                  outputs,
                                  graph,
                                  wrt=None,
                                  reduction=None):
        """
        Helper function: given the symbolic inputs and outputs, as well as
        a theano graph and wrt/reduction info, return the appropriate arguments
        for theano.function to compile a gradient.
        """
        wrt = utils.as_seq(wrt)

        if reduction in ['sum', 'max', 'mean', 'min', 'prod', 'std', 'var']:
            reduction = getattr(theano.tensor, reduction)

        if callable(reduction):
            if 'numpy' in reduction.__module__:
                reduction = getattr(theano.tensor, reduction.__name__)
            outputs = [reduction(o) if o.ndim > 0 else o for o in outputs]

        if np.any([o.ndim != 0 for o in outputs]):
            raise TypeError('Gradient requires either scalar outputs or a '
                            'reduction that returns a scalar.')

        # get wrt variables. If none were specified, use inputs.
        if len(wrt) == 0:
            wrt = [i for i in inputs]
        else:
            wrt = [graph[self.get_symbolic(w)] for w in wrt]

        grads = utils.flatten([T.grad(o, wrt=wrt) for o in outputs])

        return dict(inputs=inputs, outputs=utils.as_seq(grads, tuple))
Esempio n. 2
0
    def get_theano_vars(self, inputs=None, outputs=None):
        """
        Returns a dict containing inputs, outputs and graph corresponding to
        the Theano version of the pyfn.
        """
        sym_inputs = tuple(self.get_symbolic(i)
                           for i in utils.as_seq(inputs))

        sym_outputs = tuple(self.get_symbolic(o)
                            for o in utils.as_seq(outputs))

        # get symbolic inputs corresponding to shared inputs in s_inputs
        # this dict maps each shared variable to its (non-shared) type.
        s_memo = OrderedDict((var, var.type())
                             for var in utils.flatten(sym_inputs))
        theano_inputs = tuple(s_memo.values())

        # get new graph, replacing shared inputs with symbolic ones
        # graph is a dict mapping "old" variables to "new" ones, where "old"
        # is the chain including shared variables, and "new" is the chain
        # with the non-shared replacements.
        graph = theano.gof.graph.clone_get_equiv(
            inputs=theano.gof.graph.inputs(sym_outputs),
            outputs=sym_outputs,
            memo=s_memo.copy())

        # get symbolic outputs
        theano_outputs = tuple([graph[o] for o in sym_outputs])

        return theano_inputs, theano_outputs, graph
Esempio n. 3
0
    def get_gradient_compile_args(self,
                                  inputs,
                                  outputs,
                                  wrt=None,
                                  reduction=None):
        """
        Helper function: given the symbolic inputs and outputs, as well as
        a theano graph and wrt/reduction info, return the appropriate arguments
        for theano.function to compile a gradient.
        """
        wrt = utils.as_seq(wrt)

        if reduction in ['sum', 'max', 'mean', 'min', 'prod', 'std', 'var']:
            reduction = getattr(theano.tensor, reduction)

        if isinstance(reduction, collections.Callable):
            if 'numpy' in reduction.__module__:
                reduction = getattr(theano.tensor, reduction.__name__)
            outputs = [reduction(o) if o.ndim > 0 else o for o in outputs]

        if any([o.ndim != 0 for o in outputs]):
            raise TypeError('Gradient requires either scalar outputs or a '
                            'reduction that returns a scalar.')

        # get wrt variables. If none were specified, use inputs.
        if len(wrt) == 0:
            wrt = [i for i in inputs]
        else:
            wrt = [self.get_symbolic(w) for w in wrt]

        grads = utils.flatten([T.grad(o, wrt=wrt) for o in outputs])

        return dict(inputs=inputs, outputs=utils.as_seq(grads, tuple))
Esempio n. 4
0
    def shadow(self, args):

        """
        Helper function for `_shadow` that calls it on a flattened version of
        its argument.
        """
        shadow_vars = [self._shadow_inner(x) for x in utils.flatten(args)]
        new_args = utils.unflatten(args, shadow_vars)
        self.context.shadowed_containers[id(new_args)] = args
        return new_args
Esempio n. 5
0
 def handle_escape(x):
     """
     Handles escaping variables
     """
     def escape(x):
         if isinstance(x, theano.tensor.sharedvar.SharedVariable):
             return x.get_value()
         elif utils.isvar(x):
             try:
                 return x.eval()
             except:
                 raise ValueError('Could not escape {0}'.format(x))
         else:
             return x
     return utils.unflatten(x, [escape(i) for i in utils.flatten(x)])
    def get_hessian_vector_compile_args(self,
                                        inputs,
                                        outputs,
                                        wrt=None,
                                        reduction=None):
        """
        Helper function: given the symbolic inputs and outputs, as well as
        a theano graph and wrt/reduction/vectors info, return the appropriate
        argumentsfor theano.function to compile a Hessian-vector product.
        """
        wrt = utils.as_seq(wrt)

        if reduction in ['sum', 'max', 'mean', 'min', 'prod', 'std', 'var']:
            reduction = getattr(theano.tensor, reduction)

        if isinstance(reduction, collections.Callable):
            if 'numpy' in reduction.__module__:
                reduction = getattr(theano.tensor, reduction.__name__)
            outputs = [reduction(o) if o.ndim > 0 else o for o in outputs]

        if any([o.ndim != 0 for o in outputs]):
            raise TypeError('Gradient requires either scalar outputs or a '
                            'reduction that returns a scalar.')

        # get wrt variables. If none were specified, use inputs.
        if len(wrt) == 0:
            wrt = [i for i in inputs]
        else:
            wrt = [self.get_symbolic(w) for w in wrt]

        grads = utils.flatten([T.grad(o, wrt=wrt) for o in outputs])

        sym_vectors = tuple(T.TensorType(
            dtype=w.dtype, broadcastable=[False] * w.ndim)()
            for w in wrt)
        hessian_vectors = utils.as_seq(T.Rop(grads, wrt, sym_vectors), tuple)

        return dict(inputs=inputs + sym_vectors, outputs=hessian_vectors)
Esempio n. 7
0
 def handle_escaped_call(fn, *args, **kwargs):
     esc_args = utils.unflatten(
         args, [TheanoTransformer.handle_escape(a) for a in utils.flatten(args)])
     esc_kwargs = utils.unflatten(
         kwargs, [TheanoTransformer.handle_escape(a) for a in utils.flatten(kwargs)])
     return fn(*esc_args, **esc_kwargs)