Beispiel #1
0
        return False

    def __gt__(self, other):
        return math_ops.greater(self, other)

    def __ge__(self, other):
        return math_ops.greater_equal(self, other)

    def __lt__(self, other):
        return math_ops.less(self, other)

    def __le__(self, other):
        return math_ops.less_equal(self, other)


ag_core.register_node(TensorNode, tensor.Tensor)
ag_core.register_node(TensorNode, ops.Tensor)


def _zeros(shape, dtype):
    with context.device("cpu:0"):
        shape = tensor.Tensor(shape, dtype=dtypes.int32)
    return array_ops.fill(shape, tensor.Tensor(0, dtype=dtype))


def _ones(shape, dtype):
    return array_ops.fill(tensor.Tensor(shape, dtype=dtypes.int32),
                          tensor.Tensor(1, dtype=dtype))


def _lazy_zero_tensor(zero):
Beispiel #2
0
  """VSpace needed to have ImplicitTape be a valid progenitor."""

  def zeros(self):
    return ImplicitTape()


class ImplicitTapeNode(ag_core.Node):
  """Node to wrap ImplicitTape in."""

  def __eq__(self, other):
    return self is other

  def __hash__(self):
    return id(self)

ag_core.register_node(ImplicitTapeNode, ImplicitTape)
ag_core.register_vspace(ImplicitTapeVSpace, ImplicitTape)


# TODO(apassos) try to not do this.
class NoneVSpace(ag_core.VSpace):
  """VSpace for python None."""

  def __init__(self, _):
    self.size = 0

  def zeros(self):
    return 0


ag_core.register_vspace(NoneVSpace, type(None))
Beispiel #3
0
        self.scalartype = complex

    def flatten(self, value, covector=False):
        if covector:
            return anp.ravel(anp.stack([anp.real(value), - anp.imag(value)]))
        else:
            return anp.ravel(anp.stack([anp.real(value), anp.imag(value)]))

    def unflatten(self, value, covector=False):
        reshaped = anp.reshape(value, (2,) + self.shape)
        if covector:
            return anp.array(reshaped[0] - 1j * reshaped[1])
        else:
            return anp.array(reshaped[0] + 1j * reshaped[1])

register_node(ArrayNode, np.ndarray)
register_vspace(lambda x: ComplexArrayVSpace(x)
                if np.iscomplexobj(x)
                else ArrayVSpace(x), np.ndarray)
array_types = set([anp.ndarray, ArrayNode])

for type_ in [float, anp.float64, anp.float32, anp.float16]:
    register_node(ArrayNode, type_)
    register_vspace(ArrayVSpace, type_)

for type_ in [complex, anp.complex64, anp.complex128]:
    register_node(ArrayNode, type_)
    register_vspace(ComplexArrayVSpace, type_)

# These numpy.ndarray methods are just refs to an equivalent numpy function
nondiff_methods = ['all', 'any', 'argmax', 'argmin', 'argpartition',
    __slots__ = []

    def __getitem__(self, idx):
        return sequence_take(self, idx)

    def __len__(self):
        return len(self.value)

    def __add__(self, other):
        return sequence_extend_right(self, *other)

    def __radd__(self, other):
        return sequence_extend_left(self, *other)


register_node(SequenceNode, tuple)
register_node(SequenceNode, list)


@primitive
def sequence_take(A, idx):
    return A[idx]


def grad_sequence_take(g, ans, vs, gvs, A, idx):
    return sequence_untake(g, idx, vs)


sequence_take.defvjp(grad_sequence_take)

Beispiel #5
0
  """VSpace needed to have ImplicitTape be a valid progenitor."""

  def zeros(self):
    return ImplicitTape()


class ImplicitTapeNode(ag_core.Node):
  """Node to wrap ImplicitTape in."""

  def __eq__(self, other):
    return self is other

  def __hash__(self):
    return id(self)

ag_core.register_node(ImplicitTapeNode, ImplicitTape)
ag_core.register_vspace(ImplicitTapeVSpace, ImplicitTape)


# TODO(apassos) try to not do this.
class NoneVSpace(ag_core.VSpace):
  """VSpace for python None."""

  def __init__(self, _):
    self.size = 0


ag_core.register_vspace(NoneVSpace, type(None))


class _TapeStack(threading.local):
Beispiel #6
0
    def flatten(self, value, covector=False):
        if covector:
            return np.ravel(np.stack([np.real(value), -np.imag(value)]))
        else:
            return np.ravel(np.stack([np.real(value), np.imag(value)]))

    def unflatten(self, value, covector=False):
        reshaped = np.reshape(value, (2, ) + self.shape)
        if covector:
            return np.array(reshaped[0] - 1j * reshaped[1])
        else:
            return np.array(reshaped[0] + 1j * reshaped[1])


register_node(ArrayNode, np.ndarray)
register_vspace(
    lambda x: ComplexArrayVSpace(x)
    if np.iscomplexobj(x) else ArrayVSpace(x), np.ndarray)
array_types = set([anp.ndarray, ArrayNode])

for type_ in [float, anp.float64, anp.float32, anp.float16]:
    register_node(ArrayNode, type_)
    register_vspace(ArrayVSpace, type_)

for type_ in [complex, anp.complex64, anp.complex128]:
    register_node(ArrayNode, type_)
    register_vspace(ComplexArrayVSpace, type_)

# These numpy.ndarray methods are just refs to an equivalent numpy function
nondiff_methods = [
Beispiel #7
0
    return False

  def __gt__(self, other):
    return math_ops.greater(self, other)

  def __ge__(self, other):
    return math_ops.greater_equal(self, other)

  def __lt__(self, other):
    return math_ops.less(self, other)

  def __le__(self, other):
    return math_ops.less_equal(self, other)


ag_core.register_node(TensorNode, tensor.Tensor)
ag_core.register_node(TensorNode, ops.Tensor)


def _zeros(shape, dtype):
  with context.device("cpu:0"):
    shape = tensor.Tensor(shape, dtype=dtypes.int32)
  return array_ops.fill(shape, tensor.Tensor(0, dtype=dtype))


def _ones(shape, dtype):
  return array_ops.fill(
      tensor.Tensor(shape, dtype=dtypes.int32), tensor.Tensor(1, dtype=dtype))


def _lazy_zero_tensor(zero):
Beispiel #8
0
from __future__ import absolute_import
from autograd.core import (primitive, Node, VSpace, register_node, vspace,
                           register_vspace, SparseObject)
from builtins import zip
from future.utils import iteritems
from functools import partial
import autograd.numpy as np

class SequenceNode(Node):
    __slots__ = []
    def __getitem__(self, idx): return sequence_take(self, idx)
    def __len__(self): return len(self.value)
    def __add__(self, other): return sequence_extend_right(self, *other)
    def __radd__(self, other): return sequence_extend_left(self, *other)

register_node(SequenceNode, tuple)
register_node(SequenceNode, list)

@primitive
def sequence_take(A, idx):
    return A[idx]
def grad_sequence_take(g, ans, vs, gvs, A, idx):
    return sequence_untake(g, idx, vs)
sequence_take.defvjp(grad_sequence_take)

@primitive
def sequence_extend_right(seq, *elts):
    return seq + type(seq)(elts)
def grad_sequence_extend_right(argnum, g, ans, vs, gvs, args, kwargs):
    seq, elts = args[0], args[1:]
    return g[:len(seq)] if argnum == 0 else g[len(seq) + argnum - 1]