コード例 #1
0
ファイル: opt.py プロジェクト: DeepLearningIndia/Theano
def local_gpualloc(node):
    replace = False
    if node.op == tensor.alloc:
        if node.inputs[0].owner and node.inputs[0].owner.op == host_from_gpu:
            replace = True
        elif all([c != 'output' and c.op == gpu_from_host
                  for c, idx in node.outputs[0].clients]):
            replace = True
        elif all([c != 'output' and c.op == tensor.join and
                  all([i.owner and i.owner.op in [host_from_gpu, tensor.alloc]
                       for i in c.inputs[1:]])
                  for c, idx in node.outputs[0].clients]):
            replace = True
    if replace:
        val = node.inputs[0]
        shp = node.inputs[1:]
        old_out = node.outputs[0]
        val2 = tensor.shape_padleft(val, len(shp) - val.ndim)
        new_out = host_from_gpu(gpu_alloc(val, *shp))
        if new_out.type != old_out.type:
            assert new_out.type.ndim == old_out.type.ndim
            assert new_out.type.dtype == old_out.type.dtype
            for b_old, b_new in zip(old_out.type.broadcastable,
                                    new_out.type.broadcastable):
                assert b_new or (not b_old)
            new_out = tensor.patternbroadcast(new_out. old_out.broadcastable)

        return [new_out]
コード例 #2
0
ファイル: test_ifelse.py プロジェクト: aboSamoor/Theano
    def test_multiple_out_grad(self):
        # Tests that we can compute the gradients through lazy if
        x1 = tensor.vector('x1')
        x2 = tensor.vector('x2')
        y1 = tensor.vector('y1')
        y2 = tensor.vector('y2')
        c = tensor.iscalar('c')
        z = ifelse(c, (x1, x2), (y1, y2))
        grads = tensor.grad(z[0].sum() + z[1].sum(),
                            [x1, x2, y1, y2])

        f = theano.function([c, x1, x2, y1, y2], grads)
        rng = numpy.random.RandomState(utt.fetch_seed())

        lens = [rng.randint(200) for i in range(4)]
        values = [numpy.asarray(rng.uniform(size=(l,)), theano.config.floatX)
                  for l in lens]
        outs_1 = f(1, *values)
        assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
        assert numpy.all(outs_1[0] == 1.)
        assert numpy.all(outs_1[1] == 1.)
        assert numpy.all(outs_1[2] == 0.)
        assert numpy.all(outs_1[3] == 0.)

        outs_0 = f(0, *values)
        assert all([x.shape[0] == y for x, y in zip(outs_1, lens)])
        assert numpy.all(outs_0[0] == 0.)
        assert numpy.all(outs_0[1] == 0.)
        assert numpy.all(outs_0[2] == 1.)
        assert numpy.all(outs_0[3] == 1.)
コード例 #3
0
ファイル: opt.py プロジェクト: jlowin/Theano
def local_gpuaalloc2(node):
    """
    Join(axis, Alloc, Alloc, ...) -> Join(axis, GpuAlloc, Alloc, ...)

    Moves an alloc that is an input to join to the gpu.
    """
    if isinstance(node.op, tensor.Alloc) and all(
        c != "output"
        and c.op == tensor.join
        and all(i.owner and i.owner.op in [host_from_gpu, tensor.alloc] for i in c.inputs[1:])
        for c, idx in node.outputs[0].clients
    ):
        return [host_from_gpu(gpu_alloc(*node.inputs))]
コード例 #4
0
ファイル: test_cuda_ndarray.py プロジェクト: 317070/Theano
def test_zeros_basic():
    for shp in [(3,4,5), (300,), (), (0,7)]:
        _a = cuda_ndarray.CudaNdarray.zeros(shp)
        _n = numpy.zeros(shp, dtype="float32")
        assert numpy.allclose(numpy.asarray(_a), _n)
        assert _a.shape == _n.shape
        assert all(_a._strides == numpy.asarray(_n.strides)/4)

    # TODO:The following don't have the same stride!
    #      This should be fixed with the new GpuNdArray.
    for shp in [(3,0), (4,1,5)]:
        _a = cuda_ndarray.CudaNdarray.zeros(shp)
        _n = numpy.zeros(shp, dtype="float32")
        assert numpy.allclose(numpy.asarray(_a), _n)
        assert _a.shape == _n.shape


    try:
        _n = numpy.zeros()
    except TypeError:
        pass
    else:
        raise Exception("An error was expected!")
    try:
        _a = cuda_ndarray.CudaNdarray.zeros()
    except TypeError:
        pass
    else:
        raise Exception("An error was expected!")
コード例 #5
0
def guess_n_streams(size, warn=True):
    """
    Return a guess at a good number of streams.

    :param warn: If True, warn when a guess cannot be made (in which case
    we return 60 * 256).
    """
    # TODO: a smart way of choosing the number of streams, see #612.
    # Note that this code was moved out of `MRG_RandomStreams` so that it can
    # be easily accessed from tests, where we want to disable the warning.
    if (isinstance(size, (tuple, list)) and
        all([isinstance(i, int) for i in size])):
        # We can make a guess.
        r = 1
        for s in size:
            r *= s
        if r > 6:
            r = r // 6 # chosen as fastest for rbm_benchmark

        # The purpose of sampling from many streams is to be able to use
        # the GPU to its full capacity.  It just wastes RAM and stream-initialization time to
        # allocate more streams than necessary for the GPU.
        # XXX: This number is chosen to be good for 280 and 480 architectures,
        #      Better would be to use pycuda to query the number of
        #      processors on the GPU device,
        #      rather than guessing 60.
        return min(r, 60 * 256)
    else:
        if warn:
            warnings.warn((
                    "MRG_RandomStreams Can't determine #streams from "
                    "size (%s), guessing 60*256") % str(size),
                    stacklevel=3)
        return 60 * 256
コード例 #6
0
ファイル: scan_utils.py プロジェクト: honghaizhu/Theano
 def is_updates(elem):
     if isinstance(elem, dict):
         return True
     # Dictionaries can be given as lists of tuples
     if isinstance(elem, (list, tuple)) and all([isinstance(x, (list, tuple)) and len(x) == 2 for x in elem]):
         return True
     return False
コード例 #7
0
ファイル: rng_mrg.py プロジェクト: NicolasBouchard/Theano
def guess_n_streams(size, warn=True):
    """
    Return a guess at a good number of streams.

    :param warn: If True, warn when a guess cannot be made (in which case
    we return 30 * 256).
    """
    # TODO: a smart way of choosing the number of streams, see #612.
    # Note that this code was moved out of `MRG_RandomStreams` so that it can
    # be easily accessed from tests, where we want to disable the warning.
    if (isinstance(size, (tuple, list)) and
        all([isinstance(i, int) for i in size])):
        # We can make a guess.
        r = 1
        for s in size:
            r *= s
        if r > 6:
            r = r/6 # chosen as fastest for rbm_benchmark
        return r
    else:
        if warn:
            warnings.warn((
                    "MRG_RandomStreams Can't determine #streams from "
                    "size (%s), guessing 30*256") % str(size),
                    stacklevel=3)
        return 30 * 256
コード例 #8
0
ファイル: scan_utils.py プロジェクト: shawakaze/Theano
 def is_outputs(elem):
     if (isinstance(elem, (list, tuple)) and
         all([isinstance(x, theano.Variable) for x in elem])):
         return True
     if isinstance(elem, theano.Variable):
         return True
     return False
コード例 #9
0
ファイル: test_utils.py プロジェクト: flashus/Theano
def test_give_variables_names_small():
    x = theano.tensor.matrix('x')
    y = theano.tensor.dot(x, x)
    fgraph = theano.FunctionGraph((x,), (y,))
    give_variables_names(fgraph.variables)
    assert all(var.name for var in fgraph.variables)
    assert unique([var.name for var in fgraph.variables])
コード例 #10
0
ファイル: rng_curand.py プロジェクト: Dimitris0mg/Theano
    def normal(self, size=None, avg=0.0, std=1.0, ndim=None,
            dtype=config.floatX):
        """
        Return symbolic tensor of normally-distributed numbers.

        :param: size: Can be a list of integer or Theano variable(ex: the shape
            of other Theano Variable)
        """
        if isinstance(size, tuple):
            msg = "size must be a tuple of int or a Theano variable"
            assert all([isinstance(i, int) or isinstance(i, Variable)
                for i in size]), msg
        else:
            msg = "size must be a tuple of int or a Theano variable"
            assert isinstance(size, Variable) and size.ndim == 1, msg
        generator = theano.shared(False)  # makes a generic
        s_size = theano.tensor.as_tensor_variable(size)
        u = CURAND_Normal.new_auto_update(generator, ndim, dtype, s_size,
                self.next_seed())
        self.state_updates.append(u.update)
        rval = u * std + avg
        if u.type.broadcastable != rval.type.broadcastable:
            raise NotImplementedError(
                'Increase the size to match the broadcasting pattern of `low`'
                'and `high` arguments'
            )
        return  rval
コード例 #11
0
ファイル: test_mpi.py プロジェクト: Jerryzcn/Theano
def test_mpi_tag_ordering():
    x = recv((2, 2), "float32", 1, 12)
    y = recv((2, 2), "float32", 1, 11)
    z = recv((2, 2), "float32", 1, 13)
    f = theano.function([], [x, y, z], mode=mpi_mode)
    nodes = f.maker.linker.make_all()[-1]

    assert all(node.op.tag == tag for node, tag in zip(nodes, (11, 12, 13, 11, 12, 13)))
コード例 #12
0
ファイル: test_utils.py プロジェクト: flashus/Theano
def test_give_variables_names():
    x = theano.tensor.matrix('x')
    y = x + 1
    z = theano.tensor.dot(x, y)
    variables = (x, y, z)
    give_variables_names(variables)
    assert all(var.name for var in variables)
    assert unique([var.name for var in variables])
コード例 #13
0
ファイル: scan_utils.py プロジェクト: thiboeri/Theano
 def is_updates(elem):
     if isinstance(elem, dict):
         return True
     # Dictionaries can be given as lists of tuples
     if (isinstance(elem, (list, tuple)) and all(
         [isinstance(x, (list, tuple)) and len(x) == 2 for x in elem])):
         return True
     return False
コード例 #14
0
def test_give_variables_names():
    x = theano.tensor.matrix('x')
    y = x + 1
    z = theano.tensor.dot(x, y)
    variables = (x, y, z)
    give_variables_names(variables)
    assert all(var.name for var in variables)
    assert unique([var.name for var in variables])
コード例 #15
0
def test_mpi_tag_ordering():
    x = recv((2, 2), 'float32', 1, 12)
    y = recv((2, 2), 'float32', 1, 11)
    z = recv((2, 2), 'float32', 1, 13)
    f = theano.function([], [x, y, z], mode=mpi_mode)
    nodes = f.maker.linker.make_all()[-1]

    assert all(node.op.tag == tag
               for node, tag in zip(nodes, (11, 12, 13, 11, 12, 13)))
コード例 #16
0
 def __setup_node__(self, node):
     # sets up node so it belongs to this fgraph
     if hasattr(node, 'fgraph') and node.fgraph is not self:
         raise Exception("%s is already owned by another fgraph" % node)
     if (hasattr(node.op, 'view_map') and
         not all([isinstance(view, (list, tuple))
                  for view in node.op.view_map.values()])):
         raise Exception("Op '%s' have a bad view map '%s',"
                         " the values must be tuples or lists." % (
                             str(node.op), str(node.op.view_map)))
     if (hasattr(node.op, 'destroy_map') and
         not all([isinstance(destroy, (list, tuple))
                  for destroy in node.op.destroy_map.values()])):
         raise Exception("Op '%s' have a bad destroy map '%s',"
                         " the values must be tuples or lists." % (
                             str(node.op), str(node.op.destroy_map)))
     node.fgraph = self
     node.deps = {}
コード例 #17
0
ファイル: opt.py プロジェクト: amishtal/Theano
    def apply_node_merge(self, env):
        # we clear the dicts because the Constants signatures are not necessarily hashable
        # and it's more efficient to give them an integer like the other Variables

        nodes_seen = {}

        for node_idx, node in enumerate(_list_of_nodes(env)):
            #
            # these asserts ensure that the env has set the clients field properly the clients
            # should at least contain `node` itself!
            #
            if node.inputs:
                assert len(node.inputs[0].clients) > 0
                assert (node, 0) in node.inputs[0].clients
                merge_candidates = [(nodes_seen[c], c)
                                    for (c, i) in node.inputs[0].clients
                                    if c in nodes_seen]
            else:
                merge_candidates = []
            merge_candidates.sort()
            nodes_seen[node] = node_idx
            #print 'NODE', node, merge_candidates, node.inputs[0].clients
            for candidate_idx, candidate in merge_candidates:
                if len(node.inputs) != len(candidate.inputs):
                    continue
                inputs_match = all(
                    node_in is cand_in
                    for node_in, cand_in in zip(node.inputs, candidate.inputs))
                if inputs_match and node.op == candidate.op:
                    assert node is not candidate
                    #
                    #transfer clients from node to candidate
                    #
                    success = True
                    assert len(node.outputs) == len(candidate.outputs)
                    pairs = zip(node.outputs, candidate.outputs)

                    #transfer names
                    for node_output, cand_output in pairs:
                        #clobber old name with new one
                        #it's arbitrary... one of the names has to go
                        if node_output.name:
                            cand_output.name = node_output.name
                    try:
                        env.replace_all_validate(pairs, reason="Merge")
                    except InconsistencyError, e:
                        success = False

                    if success:
                        #break out of the candidate loop
                        break
                    else:
                        #try the next candidate
                        pass
コード例 #18
0
ファイル: test_mpi.py プロジェクト: Jerryzcn/Theano
def test_mpi_schedule():
    x = theano.tensor.matrix("x")
    y = send(x, 1, 11)
    z = x + x
    waitnode = y.owner
    sendnode = y.owner.inputs[0].owner
    addnode = z.owner

    f = theano.function([x], [y, z], mode=mpi_mode)
    nodes = f.maker.linker.make_all()[-1]
    optypes = [MPISend, theano.tensor.Elemwise, MPISendWait]
    assert all(isinstance(node.op, optype) for node, optype in zip(nodes, optypes))
コード例 #19
0
ファイル: test_elemwise.py プロジェクト: jaberg/Theano
    def with_linker(self, linker):
        for xsh, shuffle, zsh in [((2, 3), (1, 'x', 0), (3, 1, 2)),
                                  ((1, 2, 3), (1, 2), (2, 3)),
                                  ((1, 2, 1, 3), (1, 3), (2, 3)),
                                  ((2, 3, 4), (2, 1, 0), (4, 3, 2)),
                                  ((2, 3, 4), ('x', 2, 1, 0, 'x'),
                                   (1, 4, 3, 2, 1)),
                                  ((1, 4, 3, 2, 1), (3, 2, 1), (2, 3, 4)),
                                  ((1, 1, 4), (1, 2), (1, 4)),
                                  ((1, 1, 1), (), ()),
                                  ((1,), ('x', 'x'), (1, 1))]:
            ib = [(entry == 1) for entry in xsh]
            x = TensorType('float64', ib)('x')
            e = DimShuffle(ib, shuffle)(x)
            f = copy(linker).accept(FunctionGraph([x], [e])).make_function()
            assert f(numpy.ones(xsh)).shape == zsh
            #test that DimShuffle.infer_shape work correctly
            x = TensorType('float64', ib)('x')
            e = DimShuffle(ib, shuffle)(x)
            f = copy(linker).accept(FunctionGraph([x], [e.
                shape])).make_function()
            assert all(f(numpy.ones(xsh))) == all(zsh)

        # Test when we drop a axis that is not broadcastable
        ib = [False, True, False]
        x = TensorType('float64', ib)('x')
        self.assertRaises(ValueError, DimShuffle, ib, shuffle)

        # Test when we drop a axis that don't have shape 1
        ib = [True, True, False]
        x = TensorType('float64', ib)('x')
        e = DimShuffle(ib, (1, 2))(x)
        f = copy(linker).accept(FunctionGraph([x], [e.shape])).make_function()
        self.assertRaises(TypeError, f, numpy.ones((2, 1, 4)))

        # Test that we can't take a dimensions multiple time
        xsh, shuffle, zsh = ((1, 1, 4), (0, 1, 2, 0), (1, 4))
        ib = [False, True, False]
        x = TensorType('float64', ib)('x')
        self.assertRaises(ValueError, DimShuffle, ib, shuffle)
コード例 #20
0
        def test_specify_shape_inplace(self):
            # test that specify_shape don't break inserting inplace op

            dtype = self.dtype
            if dtype is None:
                dtype = theano.config.floatX

            rng = numpy.random.RandomState(utt.fetch_seed())
            a = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
            a = self.cast_value(a)
            a_shared = self.shared_constructor(a)
            b = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
            b = self.cast_value(b)
            b_shared = self.shared_constructor(b)
            s = numpy.zeros((40, 40), dtype=dtype)
            s = self.cast_value(s)
            s_shared = self.shared_constructor(s)
            f = theano.function([], updates={s_shared: theano.dot(a_shared, b_shared) + s_shared})
            topo = f.maker.env.toposort()
            f()
            # [Gemm{inplace}(<TensorType(float64, matrix)>, 0.01, <TensorType(float64, matrix)>, <TensorType(float64, matrix)>, 2e-06)]
            if theano.config.mode != "FAST_COMPILE":
                assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
                assert all(
                    node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
                )
                assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
            # Their is no inplace gemm for sparse
            # assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "StructuredDot")
            s_shared_specify = tensor.specify_shape(s_shared, s_shared.get_value(borrow=True).shape)

            # now test with the specify shape op in the output
            f = theano.function(
                [], s_shared.shape, updates={s_shared: theano.dot(a_shared, b_shared) + s_shared_specify}
            )
            topo = f.maker.env.toposort()
            shp = f()
            assert numpy.all(shp == (40, 40))
            if theano.config.mode != "FAST_COMPILE":
                assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
                assert all(
                    node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
                )
                assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
            # now test with the specify shape op in the inputs and outputs
            a_shared = tensor.specify_shape(a_shared, a_shared.get_value(borrow=True).shape)
            b_shared = tensor.specify_shape(b_shared, b_shared.get_value(borrow=True).shape)

            f = theano.function(
                [], s_shared.shape, updates={s_shared: theano.dot(a_shared, b_shared) + s_shared_specify}
            )
            topo = f.maker.env.toposort()
            shp = f()
            assert numpy.all(shp == (40, 40))
            if theano.config.mode != "FAST_COMPILE":
                assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
                assert all(
                    node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
                )
                assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
コード例 #21
0
    def with_linker(self, linker):
        for xsh, shuffle, zsh in [((2, 3), (1, 'x', 0), (3, 1, 2)),
                                  ((1, 2, 3), (1, 2), (2, 3)),
                                  ((1, 2, 1, 3), (1, 3), (2, 3)),
                                  ((2, 3, 4), (2, 1, 0), (4, 3, 2)),
                                  ((2, 3, 4), ('x', 2, 1, 0, 'x'), (1, 4, 3, 2,
                                                                    1)),
                                  ((1, 4, 3, 2, 1), (3, 2, 1), (2, 3, 4)),
                                  ((1, 1, 4), (1, 2), (1, 4)),
                                  ((1, 1, 1), (), ()),
                                  ((1, ), ('x', 'x'), (1, 1))]:
            ib = [(entry == 1) for entry in xsh]
            x = TensorType('float64', ib)('x')
            e = self.op(ib, shuffle)(x)
            f = copy(linker).accept(FunctionGraph([x], [e])).make_function()
            assert f(numpy.ones(xsh)).shape == zsh
            #test that DimShuffle.infer_shape work correctly
            x = TensorType('float64', ib)('x')
            e = self.op(ib, shuffle)(x)
            f = copy(linker).accept(FunctionGraph([x],
                                                  [e.shape])).make_function()
            assert all(f(numpy.ones(xsh))) == all(zsh)

        # Test when we drop a axis that is not broadcastable
        ib = [False, True, False]
        x = TensorType('float64', ib)('x')
        self.assertRaises(ValueError, self.op, ib, shuffle)

        # Test when we drop a axis that don't have shape 1
        ib = [True, True, False]
        x = TensorType('float64', ib)('x')
        e = self.op(ib, (1, 2))(x)
        f = copy(linker).accept(FunctionGraph([x], [e.shape])).make_function()
        self.assertRaises(TypeError, f, numpy.ones((2, 1, 4)))

        # Test that we can't take a dimensions multiple time
        xsh, shuffle, zsh = ((1, 1, 4), (0, 1, 2, 0), (1, 4))
        ib = [False, True, False]
        x = TensorType('float64', ib)('x')
        self.assertRaises(ValueError, DimShuffle, ib, shuffle)
コード例 #22
0
def test_mpi_schedule():
    x = theano.tensor.matrix('x')
    y = send(x, 1, 11)
    z = x + x
    waitnode = y.owner
    sendnode = y.owner.inputs[0].owner
    addnode = z.owner

    f = theano.function([x], [y, z], mode=mpi_mode)
    nodes = f.maker.linker.make_all()[-1]
    optypes = [MPISend, theano.tensor.Elemwise, MPISendWait]
    assert all(
        isinstance(node.op, optype) for node, optype in zip(nodes, optypes))
コード例 #23
0
 def is_updates(elem):
     if isinstance(elem, dict):
         # Make sure the updates will be applied in a deterministic order
         if (not isinstance(elem, gof.python25.OrderedDict)
                 and len(elem) > 1):
             warnings.warn("Expected OrderedDict or OrderedUpdates, got "\
                     + str(type(elem)) + ". This can make your script non-"
                     "deterministic.")
         return True
     # Dictionaries can be given as lists of tuples
     if (isinstance(elem, (list, tuple)) and all(
         [isinstance(x, (list, tuple)) and len(x) == 2 for x in elem])):
         return True
     return False
コード例 #24
0
ファイル: scan_utils.py プロジェクト: shawakaze/Theano
 def is_updates(elem):
     if isinstance(elem, dict):
         # Make sure the updates will be applied in a deterministic order
         if not isinstance(elem, gof.python25.OrderedDict):
             warnings.warn("Expected OrderedDict or OrderedUpdates, got "\
                     +str(type(elem))+". This can make your script non-"
                     "deterministic.")
         return True
     # Dictionaries can be given as lists of tuples
     if (isinstance(elem, (list, tuple)) and
         all([isinstance(x, (list, tuple)) and len(x) == 2
              for x in elem])):
         return True
     return False
コード例 #25
0
ファイル: opt.py プロジェクト: olivierverdier/Theano
    def apply_node_merge(self, env):
        # we clear the dicts because the Constants signatures are not necessarily hashable
        # and it's more efficient to give them an integer like the other Variables

        nodes_seen = {}

        for node_idx, node in enumerate(_list_of_nodes(env)):
            #
            # these asserts ensure that the env has set the clients field properly the clients
            # should at least contain `node` itself!
            #
            if node.inputs:
                assert len(node.inputs[0].clients) > 0
                assert (node,0) in node.inputs[0].clients
                merge_candidates = [(nodes_seen[c],c) for (c,i) in node.inputs[0].clients if c in nodes_seen]
            else:
                merge_candidates = []
            merge_candidates.sort()
            nodes_seen[node] = node_idx
            #print 'NODE', node, merge_candidates, node.inputs[0].clients
            for candidate_idx, candidate in merge_candidates:
                if len(node.inputs) != len(candidate.inputs):
                    continue
                inputs_match = all(node_in is cand_in for node_in, cand_in in zip(node.inputs, candidate.inputs))
                if inputs_match and node.op == candidate.op:
                    assert node is not candidate
                    #
                    #transfer clients from node to candidate
                    #
                    success = True
                    assert len(node.outputs) == len(candidate.outputs)
                    pairs = zip(node.outputs, candidate.outputs)

                    #transfer names
                    for node_output, cand_output in pairs:
                        #clobber old name with new one
                        #it's arbitrary... one of the names has to go
                        if node_output.name:
                            cand_output.name = node_output.name
                    try:
                        env.replace_all_validate(pairs, reason="Merge")
                    except InconsistencyError, e:
                        success = False

                    if success:
                        #break out of the candidate loop
                        break
                    else:
                        #try the next candidate
                        pass
コード例 #26
0
ファイル: test_basic.py プロジェクト: thiboeri/Theano
    def test_infer_shape(self):
        def mat(format, name, dtype):
            if format == 'dense':
                return theano.tensor.matrix(name, dtype=dtype)
            else:
                return theano.sparse.matrix(format, name, dtype=dtype)

        params = [('float32', 'float64', 'int16', 'complex64', 'csc', 'dense'),
                  ('float32', 'float64', 'int16', 'complex64', 'csr', 'dense')]
        for dtype1, dtype2, dtype3, dtype4, format1, format2 in params:
            if format1 == 'dense' and format2 == 'dense':
                # Usmm won't be used!
                continue
            x = mat(format1, 'x', dtype1)
            y = mat(format2, 'y', dtype2)
            a = theano.tensor.scalar('a', dtype=dtype3)
            z = theano.shared(numpy.asarray(self.z, dtype=dtype4).copy())

            f_b = lambda z, a, x, y: z - a * (x * y)
            x_data = numpy.asarray(self.x, dtype=dtype1)
            if format1 != 'dense':
                x_data = as_sparse_format(x_data, format1)
            y_data = numpy.asarray(self.y, dtype=dtype2)
            if format2 != 'dense':
                y_data = as_sparse_format(y_data, format2)
            a_data = numpy.asarray(1.5, dtype=dtype3)
            z_data = numpy.asarray(self.z, dtype=dtype4)

            f_b_out = f_b(z_data, a_data, x_data, y_data)

            # Can it work inplace?
            inplace = dtype4 == theano.scalar.upcast(dtype1, dtype2, dtype3)

            # To make it easier to check the toposort
            mode = theano.compile.mode.get_default_mode().excluding('fusion')

            # test infer_shape of Dot got applied
            f_shape = theano.function([a, x, y],
                                      (z - a * theano.sparse.dot(x, y)).shape,
                                      mode=mode)
            assert all(f_shape(a_data, x_data, y_data) == f_b_out.shape)
            topo = f_shape.maker.env.toposort()
            if theano.config.mode != 'FAST_COMPILE':
                nb = 0
            else:
                nb = 1
            assert sum([
                isinstance(node.op, (Dot, Usmm, UsmmCscDense)) for node in topo
            ]) == nb
コード例 #27
0
ファイル: test_basic.py プロジェクト: HaniAlmousli/Theano
    def test_infer_shape(self):
        def mat(format, name, dtype):
            if format == 'dense':
                return theano.tensor.matrix(name, dtype=dtype)
            else:
                return theano.sparse.matrix(format, name, dtype=dtype)

        params = [('float32', 'float64', 'int16', 'complex64', 'csc', 'dense'),
                  ('float32', 'float64', 'int16', 'complex64', 'csr', 'dense')]
        for dtype1, dtype2, dtype3, dtype4, format1, format2 in params:
            if format1 == 'dense' and format2 == 'dense':
                # Usmm won't be used!
                continue
            x = mat(format1, 'x', dtype1)
            y = mat(format2, 'y', dtype2)
            a = theano.tensor.scalar('a', dtype=dtype3)
            z = theano.shared(numpy.asarray(self.z, dtype=dtype4).copy())

            f_b = lambda z, a, x, y: z - a * (x * y)
            x_data = numpy.asarray(self.x, dtype=dtype1)
            if format1 != 'dense':
                x_data = as_sparse_format(x_data, format1)
            y_data = numpy.asarray(self.y, dtype=dtype2)
            if format2 != 'dense':
                y_data = as_sparse_format(y_data, format2)
            a_data = numpy.asarray(1.5, dtype=dtype3)
            z_data = numpy.asarray(self.z, dtype=dtype4)

            f_b_out = f_b(z_data, a_data, x_data, y_data)

            # Can it work inplace?
            inplace = dtype4 == theano.scalar.upcast(dtype1, dtype2, dtype3)

            # To make it easier to check the toposort
            mode = theano.compile.mode.get_default_mode().excluding('fusion')

            # test infer_shape of Dot got applied
            f_shape = theano.function([a, x, y],
                                      (z - a * theano.sparse.dot(x, y)).shape,
                                      mode=mode)
            assert all(f_shape(a_data, x_data, y_data) == f_b_out.shape)
            topo = f_shape.maker.env.toposort()
            if theano.config.mode != 'FAST_COMPILE':
                nb = 0
            else:
                nb = 1
            assert sum([isinstance(node.op, (Dot, Usmm, UsmmCscDense))
                        for node in topo]) == nb
コード例 #28
0
ファイル: scan_utils.py プロジェクト: aelaguiz/Theano
    def filter(x):
        """
        Ensure `x` is made only of allowed data types.

        Return True iff `x` is made only of lists, tuples, dictionaries, Theano
        variables or `theano.scan_module.until` objects.
        """
        # Is `x` a container we can iterate on?
        iter_on = None
        if isinstance(x, list) or isinstance(x, tuple):
            iter_on = x
        elif isinstance(x, dict):
            iter_on = x.iteritems()
        if iter_on is not None:
            return all(filter(y) for y in iter_on)
        else:
            return isinstance(x, theano.Variable) or isinstance(x, theano.scan_module.until)
コード例 #29
0
ファイル: opt.py プロジェクト: csxlyan/Theano
 def local_opt(node):
     if type(node.op) is OP:
         # This does not support nodes that have more than one output.
         assert len(node.outputs) == 1
         # either one of our inputs is on the gpu or
         # all of our client are on the gpu
         if (any([i.owner and i.owner.op == host_from_gpu
                  for i in node.inputs]) or
             all([c != 'output' and c.op == gpu_from_host
                  for c, idx in node.outputs[0].clients])):
             new_op = maker(node)
             # This is needed as sometimes new_op inherit from OP.
             if new_op and new_op != node.op:
                 if isinstance(new_op, theano.Op):
                     return [host_from_gpu(new_op(*node.inputs))]
                 else:  # suppose it is a variable on the GPU
                     return [host_from_gpu(new_op)]
     return False
コード例 #30
0
    def filter(x):
        """
        Ensure `x` is made only of allowed data types.

        Return True iff `x` is made only of lists, tuples, dictionaries, Theano
        variables or `theano.scan_module.until` objects.
        """
        # Is `x` a container we can iterate on?
        iter_on = None
        if isinstance(x, list) or isinstance(x, tuple):
            iter_on = x
        elif isinstance(x, dict):
            iter_on = x.iteritems()
        if iter_on is not None:
            return all(filter(y) for y in iter_on)
        else:
            return (isinstance(x, theano.Variable)
                    or isinstance(x, theano.scan_module.until))
コード例 #31
0
class T_picklefunction(unittest.TestCase):

    def test_deepcopy(self):
        a = T.scalar() # the a is for 'anonymous' (un-named).
        x,s = T.scalars('xs')

        f = function([x, In(a, value=1.0,name='a'), In(s, value=0.0, update=s+a*x, mutable=True)], s+a*x)

        try:
            g = copy.deepcopy(f)
        except NotImplementedError, e:
            if e[0].startswith('DebugMode is not picklable'):
                return
            else:
                raise
        #if they both return, assume  that they return equivalent things.
        #print [(k,id(k)) for k in f.finder.keys()]
        #print [(k,id(k)) for k in g.finder.keys()]

        self.assertFalse(g.container[0].storage is f.container[0].storage)
        self.assertFalse(g.container[1].storage is f.container[1].storage)
        self.assertFalse(g.container[2].storage is f.container[2].storage)
        self.assertFalse(x in g.container)
        self.assertFalse(x in g.value)
        self.assertTrue(len(f.defaults) == len(g.defaults))
        #print 'f.defaults = %s' % (f.defaults, )
        #print 'g.defaults = %s' % (g.defaults, )
        self.assertTrue(all([f_req == g_req and f_feed == g_feed and
            f_val == g_val
            for ((f_req, f_feed, f_val), (g_req, g_feed, g_val)) in zip(
                f.defaults, g.defaults)]))

        self.assertFalse(g.value[1] is f.value[1]) # should not have been copied
        self.assertFalse(g.value[2] is f.value[2]) # should have been copied because it is mutable.
        self.assertFalse((g.value[2] != f.value[2]).any()) # its contents should be identical

        self.assertTrue(f(2, 1) == g(2)) #they should be in sync, default value should be copied.
        self.assertTrue(f(2, 1) == g(2)) #they should be in sync, default value should be copied.
        f(1,2) # put them out of sync
        self.assertFalse(f(1, 2) == g(1, 2)) #they should not be equal anymore.
        g(1, 2) # put them back in sync
        self.assertTrue(f(3) == g(3)) # They should be in sync again.
コード例 #32
0
ファイル: opt.py プロジェクト: Jerryzcn/Theano
        def local_opt(node):
            if type(node.op) in OP:

                # Either one of our inputs is on the gpu or
                # all of our client are on the gpu
                if (any([i.owner and i.owner.op == host_from_gpu
                         for i in node.inputs]) or
                    all([c != 'output' and c.op == gpu_from_host
                         for c, idx in node.outputs[0].clients])):
                    new_op = maker(node)
                    # This is needed as sometimes new_op inherit from OP.
                    if new_op and new_op != node.op:
                        if isinstance(new_op, theano.Op):
                            return [safe_to_cpu(o) for o in
                                    new_op(*node.inputs, return_list=True)]
                        elif isinstance(new_op, (tuple, list)):
                            return [safe_to_cpu(o) for o in new_op]
                        else:  # suppose it is a variable on the GPU
                            return [host_from_gpu(new_op)]
            return False
コード例 #33
0
ファイル: opt.py プロジェクト: jacoblsmith/Theano
        def local_opt(node):
            if type(node.op) in OP:

                # Either one of our inputs is on the gpu or
                # all of our client are on the gpu
                if (any([i.owner and i.owner.op == host_from_gpu
                         for i in node.inputs]) or
                    all([c != 'output' and c.op == gpu_from_host
                         for c, idx in node.outputs[0].clients])):
                    new_op = maker(node)
                    # This is needed as sometimes new_op inherit from OP.
                    if new_op and new_op != node.op:
                        if isinstance(new_op, theano.Op):
                            return [host_from_gpu(o) for o in
                                    new_op(*node.inputs, return_list=True)]
                        elif isinstance(new_op, (tuple, list)):
                            return [host_from_gpu(o) for o in new_op]
                        else:  # suppose it is a variable on the GPU
                            return [host_from_gpu(new_op)]
            return False
コード例 #34
0
ファイル: opt.py プロジェクト: csxlyan/Theano
 def local_opt(node):
     if type(node.op) is OP:
         # This does not support nodes that have more than one output.
         assert len(node.outputs) == 1
         # either one of our inputs is on the gpu or
         # all of our client are on the gpu
         if (any([
                 i.owner and i.owner.op == host_from_gpu
                 for i in node.inputs
         ]) or all([
                 c != 'output' and c.op == gpu_from_host
                 for c, idx in node.outputs[0].clients
         ])):
             new_op = maker(node)
             # This is needed as sometimes new_op inherit from OP.
             if new_op and new_op != node.op:
                 if isinstance(new_op, theano.Op):
                     return [host_from_gpu(new_op(*node.inputs))]
                 else:  # suppose it is a variable on the GPU
                     return [host_from_gpu(new_op)]
     return False
コード例 #35
0
 def uniform(self, size, low=0.0, high=1.0, ndim=None, dtype=config.floatX):
     """
     Return symbolic tensor of uniform numbers.
     """
     if isinstance(size, tuple):
         msg = "size must be a tuple of int or a Theano variable"
         assert all([
             isinstance(i, int) or isinstance(i, Variable) for i in size
         ]), msg
     else:
         msg = "size must be a tuple of int or a Theano variable"
         assert isinstance(size, Variable) and size.ndim == 1, msg
     generator = theano.shared(False)  # makes a generic
     s_size = theano.tensor.as_tensor_variable(size)
     u = CURAND_Uniform.new_auto_update(generator, ndim, dtype, s_size,
                                        self.next_seed())
     self.state_updates.append(u.update)
     rval = u * (high - low) + low
     if u.type.broadcastable != rval.type.broadcastable:
         raise NotImplementedError(
             'Increase the size to match the broadcasting pattern of '
             'low and `high` arguments')
     return rval
コード例 #36
0
ファイル: rng_curand.py プロジェクト: Dimitris0mg/Theano
 def uniform(self, size, low=0.0, high=1.0, ndim=None,
         dtype=config.floatX):
     """
     Return symbolic tensor of uniform numbers.
     """
     if isinstance(size, tuple):
         msg = "size must be a tuple of int or a Theano variable"
         assert all([isinstance(i, int) or isinstance(i, Variable)
             for i in size]), msg
     else:
         msg = "size must be a tuple of int or a Theano variable"
         assert isinstance(size, Variable) and size.ndim == 1, msg
     generator = theano.shared(False)  # makes a generic
     s_size = theano.tensor.as_tensor_variable(size)
     u = CURAND_Uniform.new_auto_update(generator, ndim, dtype, s_size,
             self.next_seed())
     self.state_updates.append(u.update)
     rval = u * (high - low) + low
     if u.type.broadcastable != rval.type.broadcastable:
         raise NotImplementedError(
             'Increase the size to match the broadcasting pattern of '
             'low and `high` arguments'
         )
     return  rval
コード例 #37
0
ファイル: vm.py プロジェクト: xinfanmeng/Theano
    def __call__(self):
        storage_map = self.storage_map
        compute_map = self.compute_map
        thunks = self.thunks
        dependencies = self.dependencies
        for k in self.storage_map:
            compute_map[k][0] = (k.owner is None)

        # apply_stack contains nodes
        apply_stack = list(self.base_apply_stack)
        last_apply_stack_len = -1
        ls = []
        while apply_stack:
            # Make sure something happened last time round.  This is
            # just a safety check to make sure the op is written
            # correctly apply_stack should either decrease in length
            # by one (a thunk successfully applied), or increase in
            # length (added dependencies over and above the original).
            # NB: this doesn't catch cycles (would be too expensive/slow),
            #     just stalls.
            apply_stack_len = len(apply_stack)
            assert apply_stack_len != last_apply_stack_len
            last_apply_stack_len = apply_stack_len

            current_apply = apply_stack.pop()
            current_inputs = current_apply.inputs
            current_outputs = current_apply.outputs
            current_deps = current_inputs + current_apply.destroy_dependencies

            computed_ins = all(compute_map[v][0] for v in current_deps)
            computed_outs = all(compute_map[v][0] for v in current_outputs)

            if not thunks[self.node_idx[current_apply]].lazy:
                #
                # stack loop: Normal Non-Lazy Case
                # ================================
                #
                # Check if all inputs are in place
                # If so compute thunk and remove it from the apply_stack
                # If not leave it in, and add to the apply_stack those
                # that will produce you those inputs

                if computed_ins and not computed_outs:
                    # -- Non-lazy case: have inputs, time to compute outputs
                    try:
                        _, dt = self.run_thunk_of_node(current_apply)
                        del _
                        if config.profile:
                            self.apply_time[current_apply] += dt
                            ## Computing the memory footprint of the the op
                            # ?? What about inplace .. if the op is inplace
                            # you don't actually ask for more memory!
                            size = []
                            for (idx, o) in enumerate(thunks[
                                    self.node_idx[current_apply]].outputs):
                                if not hasattr(o[0], 'size'):
                                    size.append(-1)
                                    continue
                                s = o[0].size
                                dtype = str(o[0].dtype)
                                dtype2 = dtype[-3:]
                                # KeyError here: couldn't determine
                                # the dtype memory size
                                s *= self.memory_size_map[dtype2]
                                size.append(s)
                            self.outputs_size[current_apply] = size
                    except Exception:
                        raise_with_op(current_apply)
                    for o in current_apply.outputs:
                        compute_map[o][0] = 1
                    if self.allow_gc:
                        for i in current_apply.inputs:
                            # Garbage Collection -> check if anybody else uses
                            # this input
                            if (dependencies[i] and i.owner
                                    and i not in self.outputs):
                                if all(compute_map[v][0]
                                       for v in dependencies[i]):
                                    storage_map[i][0] = None
                                    #DO NOT set compute_map to 0

                                    #If values become False and the
                                    #current_apply is still in the
                                    #stack, this will cause it to be
                                    #recomputed! This can cause wrong value
                                    #with some combination of inplace op.
                                    compute_map[i][0] = 2
                                    if (config.warn.vm_gc_bug
                                            and current_apply in apply_stack
                                            and getattr(
                                                current_apply.op,
                                                'destroy_map', False)):
                                        warnings.warn(
                                            "There was a bug that existed in the default Theano configuration,"
                                            " only in the development version between July 5th 2012"
                                            " and July 30th 2012. This was not in a released version."
                                            " The bug was affecting this script.",
                                            #The stack level is not good when inside a Scan.
                                            stacklevel=3)
                elif not computed_ins:
                    # -- Non-lazy case, need inputs
                    apply_stack.append(current_apply)
                    apply_stack.extend(inp.owner for inp in current_deps
                                       if inp.owner)

            elif not computed_outs:
                #
                # stack loop: Lazy Evaluation Case
                # ================================
                #
                # Lazy evaluation protocol is to run the thunk with the
                # current storage_map and compute_map accessed via closure,
                # and the thunk will return a list of variables from its input
                # list that it requires.

                try:
                    requires, dt = self.run_thunk_of_node(current_apply)
                    self.apply_time[current_apply] += dt

                except Exception:
                    raise_with_op(current_apply)

                if requires:
                    for r in requires:
                        # We are not done with this op ..  so we added
                        # back and see to get the inputs we are
                        # missing
                        apply_stack.append(current_apply)
                        if current_apply.inputs[r].owner:
                            apply_stack.append(current_apply.inputs[r].owner)
                else:
                    if config.profile:
                        size = []
                        for (idx, o) in enumerate(
                                thunks[self.node_idx[current_apply]].outputs):
                            if not hasattr(o[0], 'size'):
                                size.append(-1)
                                continue
                            s = o[0].size
                            dtype = str(o[0].dtype)
                            dtype2 = dtype[-2:]
                            # KeyError here: couldn't determine the
                            # dtype memory size
                            s *= self.memory_size_map[dtype2]
                            size.append(s)
                        self.outputs_size[current_apply] = size
                    if self.allow_gc:
                        for i in current_apply.inputs:
                            if (dependencies[i] and i.owner
                                    and i not in self.outputs):
                                empty_storage_map = True
                                for x in dependencies[i]:
                                    if not compute_map[x][0]:
                                        empty_storage_map = False
                                        break
                                if empty_storage_map:
                                    storage_map[i][0] = None
                                    #See the not lazy gc code for explanations
                                    #of compute_map change
                                    compute_map[i][0] = 2

        # Hacky coarse gc final pass
        # This is required until we have a proper gc algorithm for graphs with
        # lazy evaluation. See discussion on theano-dev June 19 2012.
        if self.allow_gc:
            for v in storage_map:
                if v.owner and not v in self.outputs:
                    storage_map[v][0] = None
コード例 #38
0
    def normal(self, size, avg=0.0, std=1.0, ndim=None,
               dtype=None, nstreams=None):
        """
        :param size: Can be a list of integers or Theano variables (ex: the
        shape of another Theano Variable)

        :param dtype: The output data type. If dtype is not specified, it will
        be inferred from the dtype of low and high, but will be at least as
        precise as floatX.

        :param nstreams: Number of streams.
        """
        # We need an even number of ]0,1[ samples. Then we split them
        # in two halves. First half becomes our U1's for Box-Muller,
        # second half our U2's. See Wikipedia page:
        # http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
        avg = as_tensor_variable(avg)
        std = as_tensor_variable(std)

        if dtype is None:
            dtype = scal.upcast(config.floatX, avg.dtype, std.dtype)

        avg = cast(avg, dtype)
        std = cast(std, dtype)

        evened = False
        constant = False
        if isinstance(size, tuple) and all([isinstance(i, (numpy.integer, int)) for i in size]):
            constant = True
            # Force dtype because it defaults to float when size is empty
            n_samples = numpy.prod(size, dtype='int64')

            if n_samples % 2 == 1:
                n_samples += 1
                evened = True
        else:
            #if even, don't change, if odd, +1
            n_samples = prod(size) + (prod(size) % 2)
        flattened = self.uniform(size=(n_samples,), dtype=dtype,
                                 nstreams=nstreams)

        if constant:
            U1 = flattened[:n_samples // 2]
            U2 = flattened[n_samples // 2:]
        else:
            U1 = flattened[:prod(flattened.shape) // 2]
            U2 = flattened[prod(flattened.shape) // 2:]

        #normal_samples = zeros_like(flattened)
        sqrt_ln_U1 = sqrt(-2.0 * log(U1))
        # TypeError: 'TensorVariable' object does not support item assignment
        # so this doesn't work...
        #normal_samples[:n_samples/2] = sqrt_ln_U1 * cos(2.0*numpy.pi*U2)
        #normal_samples[n_samples/2:] = sqrt_ln_U1 * sin(2.0*numpy.pi*U2)

        # so trying this instead
        first_half = sqrt_ln_U1 * cos(numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
        second_half = sqrt_ln_U1 * sin(numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
        normal_samples = join(0, first_half, second_half)

        final_samples = None
        if evened:
            final_samples = normal_samples[:-1]
        elif constant:
            final_samples = normal_samples
        else:
            final_samples = normal_samples[:prod(size)]

        if not size:
            # Force the dtype to be int64, otherwise reshape complains
            size = tensor.constant(size, dtype='int64')
        final_samples = final_samples.reshape(size)

        final_samples = avg + std * final_samples

        assert final_samples.dtype == dtype
        return final_samples
コード例 #39
0
    def uniform(self, size, low=0.0, high=1.0, ndim=None, dtype=None,
                nstreams=None):
        """
        Sample a tensor of given size whose element from a uniform
        distribution between low and high.

        If the size argument is ambiguous on the number of dimensions,
        ndim may be a plain integer to supplement the missing
        information.

        :param low: Lower bound of the interval on which values are sampled.
        If the ``dtype`` arg is provided, ``low`` will be cast into dtype.
        This bound is excluded.

        :param high: Higher bound of the interval on which values are sampled.
        If the ``dtype`` arg is provided, ``high`` will be cast into dtype.
        This bound is excluded.

        :param size: Can be a list of integer or Theano variable
                (ex: the shape of other Theano Variable)

        :param dtype: The output data type. If dtype is not specified, it will
        be inferred from the dtype of low and high, but will be at least as
        precise as floatX.
        """
        low = as_tensor_variable(low)
        high = as_tensor_variable(high)
        if dtype is None:
            dtype = scal.upcast(config.floatX, low.dtype, high.dtype)

        low = cast(low, dtype=dtype)
        high = cast(high, dtype=dtype)

        if isinstance(size, tuple):
            msg = "size must be a tuple of int or a Theano variable"
            assert all([isinstance(i, (numpy.integer, int, Variable))
                        for i in size]), msg
            if any([isinstance(i, (numpy.integer, int)) and i <= 0 for i in size]):
                raise ValueError(
                    "The specified size contains a dimension with value <= 0",
                    size)

        else:
            if not (isinstance(size, Variable) and size.ndim == 1):
                raise TypeError("size must be a tuple of int or a Theano "
                                "Variable with 1 dimension, got " + str(size) +
                                " of type " + str(type(size)))

        if nstreams is None:
            nstreams = self.n_streams(size)

        if self.use_cuda and dtype == 'float32':
            rstates = self.get_substream_rstates(nstreams)
            rstates = rstates.flatten()
            # HACK - we use fact that int32 and float32 have same size to
            # sneak ints into the CudaNdarray type.
            # these *SHOULD NEVER BE USED AS FLOATS*
            tmp_float_buf = numpy.frombuffer(rstates.data, dtype='float32')
            assert tmp_float_buf.shape == rstates.shape
            assert (tmp_float_buf.view('int32') == rstates).all()
            # transfer to device
            node_rstate = float32_shared_constructor(tmp_float_buf)
            assert isinstance(node_rstate.type, CudaNdarrayType)

            # we can't use the normal mrg_uniform constructor + later
            # optimization
            # because of the tmp_float_buf hack above.  There is
            # currently no Theano node that will do a frombuffer
            # reinterpretation.
            u = self.pretty_return(node_rstate,
                                   *GPU_mrg_uniform.new(node_rstate,
                                                        ndim, dtype, size))
        else:
            node_rstate = shared(self.get_substream_rstates(nstreams))
            u = self.pretty_return(node_rstate,
                                   *mrg_uniform.new(node_rstate,
                                                    ndim, dtype, size))
        r = u * (high - low) + low

        if u.type.broadcastable != r.type.broadcastable:
            raise NotImplementedError(
                'Increase the size to match the broadcasting pattern of '
                '`low` and `high` arguments')

        assert r.dtype == dtype
        return r
コード例 #40
0
 def test_infer_shape(self):
     f = theano.function([], softmax(numpy.random.rand(3, 4)).shape)
     assert all(f() == [3, 4])
コード例 #41
0
ファイル: opt.py プロジェクト: amishtal/Theano
 def __init__(self, *optimizers):
     self.opts = optimizers
     self.reentrant = any(
         getattr(opt, 'reentrant', True) for opt in optimizers)
     self.retains_inputs = all(
         getattr(opt, 'retains_inputs', False) for opt in optimizers)
コード例 #42
0
ファイル: module.py プロジェクト: xinfanmeng/Theano
# Component -> itself
register_wrapper(lambda x: isinstance(x, Component), lambda x: x, no_warn=True)

# Variable -> Member
register_wrapper(lambda x: isinstance(x, gof.Variable) and not x.owner,
                 lambda x: Member(x),
                 no_warn=True)

# Variable -> External
register_wrapper(lambda x: isinstance(x, gof.Variable) and x.owner,
                 lambda x: External(x),
                 no_warn=True)

# [[Variable1], {Variable2}, Variable3...] -> ComponentList(Member(Variable1), Member(Variable2), ...)
register_wrapper(lambda x: isinstance(x, (list, tuple)) \
                     and all(wrapper(r) is not None for r in x),
                 lambda x: ComponentList(*map(wrap, x)), no_warn = True)

#{ "name1":{Component,Variable,list,tuple,dict},...} -> ComponentDict({Component,Variable,list,tuple,dict},...)
register_wrapper(lambda x: isinstance(x, dict) \
                     and all(wrapper(r) is not None for r in x.itervalues()),
                 lambda x: ComponentDict(dict_wrap(x)),no_warn = True)


class Curry:
    def __init__(self, obj, name, arg):
        deprecation_warning()
        self.obj = obj
        self.name = name
        self.meth = getattr(self.obj, self.name)
        self.arg = arg
コード例 #43
0
        def test_specify_shape_inplace(self):
            #test that specify_shape don't break inserting inplace op

            dtype = self.dtype
            if dtype is None:
                dtype = theano.config.floatX

            rng = numpy.random.RandomState(utt.fetch_seed())
            a = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
            a = self.cast_value(a)
            a_shared = self.shared_constructor(a)
            b = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
            b = self.cast_value(b)
            b_shared = self.shared_constructor(b)
            s = numpy.zeros((40, 40), dtype=dtype)
            s = self.cast_value(s)
            s_shared = self.shared_constructor(s)
            f = theano.function(
                [],
                updates={s_shared: theano.dot(a_shared, b_shared) + s_shared})
            topo = f.maker.env.toposort()
            f()
            #[Gemm{inplace}(<TensorType(float64, matrix)>, 0.01, <TensorType(float64, matrix)>, <TensorType(float64, matrix)>, 2e-06)]
            if theano.config.mode != 'FAST_COMPILE':
                assert sum([
                    node.op.__class__.__name__
                    in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo
                ]) == 1
                assert all(node.op == tensor.blas.gemm_inplace for node in topo
                           if isinstance(node.op, tensor.blas.Gemm))
                assert all(node.op.inplace for node in topo
                           if node.op.__class__.__name__ == "GpuGemm")
            #Their is no inplace gemm for sparse
            #assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "StructuredDot")
            s_shared_specify = tensor.specify_shape(
                s_shared,
                s_shared.get_value(borrow=True).shape)

            #now test with the specify shape op in the output
            f = theano.function([],
                                s_shared.shape,
                                updates={
                                    s_shared:
                                    theano.dot(a_shared, b_shared) +
                                    s_shared_specify
                                })
            topo = f.maker.env.toposort()
            shp = f()
            assert numpy.all(shp == (40, 40))
            if theano.config.mode != 'FAST_COMPILE':
                assert sum([
                    node.op.__class__.__name__
                    in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo
                ]) == 1
                assert all(node.op == tensor.blas.gemm_inplace for node in topo
                           if isinstance(node.op, tensor.blas.Gemm))
                assert all(node.op.inplace for node in topo
                           if node.op.__class__.__name__ == "GpuGemm")
            #now test with the specify shape op in the inputs and outputs
            a_shared = tensor.specify_shape(
                a_shared,
                a_shared.get_value(borrow=True).shape)
            b_shared = tensor.specify_shape(
                b_shared,
                b_shared.get_value(borrow=True).shape)

            f = theano.function([],
                                s_shared.shape,
                                updates={
                                    s_shared:
                                    theano.dot(a_shared, b_shared) +
                                    s_shared_specify
                                })
            topo = f.maker.env.toposort()
            shp = f()
            assert numpy.all(shp == (40, 40))
            if theano.config.mode != 'FAST_COMPILE':
                assert sum([
                    node.op.__class__.__name__
                    in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo
                ]) == 1
                assert all(node.op == tensor.blas.gemm_inplace for node in topo
                           if isinstance(node.op, tensor.blas.Gemm))
                assert all(node.op.inplace for node in topo
                           if node.op.__class__.__name__ == "GpuGemm")
コード例 #44
0
                # FOR EACH OUTPUT PIXEL...
                for oy in N.arange(lbound[0], ubound[0],
                                   dy):  # loop over output image height
                    for ox in N.arange(lbound[1], ubound[1],
                                       dx):  # loop over output image width

                        l = 0  # kern[l] is filter value to apply at (oj,oi) for (iy,ix)

                        # ... ITERATE OVER INPUT UNITS IN RECEPTIVE FIELD
                        for ky in oy + N.arange(kshp[0]):
                            for kx in ox + N.arange(kshp[1]):

                                # verify if we are still within image boundaries. Equivalent to
                                # zero-padding of the input image
                                if all((ky, kx) >= topleft) and all(
                                    (ky, kx) < botright):

                                    # convert to "valid" input space coords
                                    # used to determine column index to write to in sparse mat
                                    iy, ix = N.array((ky, kx)) - topleft
                                    # determine raster-index of input pixel...
                                    col = iy*inshp[2]+ix +\
                                          fmapi*N.prod(inshp[1:]) # taking into account multiple input features

                                    # convert oy,ox values to output space coordinates
                                    if mode == 'full':
                                        (y, x) = (oy, ox)
                                    else:
                                        (y, x) = (oy, ox) - topleft
                                    (y, x) = N.array([y, x]) / (
コード例 #45
0
ファイル: vm.py プロジェクト: wycg1984/Theano
    def __call__(self):
        storage_map = self.storage_map
        compute_map = self.compute_map
        thunks = self.thunks
        dependencies = self.dependencies
        self.node_executed_order = []
        self.node_cleared_order = []

        for k in self.storage_map:
            compute_map[k][0] = (k.owner is None)

        # apply_stack contains nodes
        apply_stack = list(self.base_apply_stack)
        last_apply_stack_len = -1

        #This record all function inputs/shared varibles and constants
        for var, data in self.storage_map.iteritems():
            if data[0] is None:
                continue
            if hasattr(var.type, 'get_shape_info'):
                sh = var.type.get_shape_info(data[0])
            else:
                sh = 'input no shape'
            self.variable_shape[var] = sh
            st = getattr(data[0], 'strides', 'input no strides')
            if getattr(data[0], 'flags', False) and data[0].flags.c_contiguous:
                st = 'c'
            elif (hasattr(data[0], 'is_c_contiguous')
                  and data[0].is_c_contiguous()):
                st = "c"
            self.variable_strides[var] = st

        while apply_stack:
            # Make sure something happened last time round.  This is
            # just a safety check to make sure the op is written
            # correctly apply_stack should either decrease in length
            # by one (a thunk successfully applied), or increase in
            # length (added dependencies over and above the original).
            # NB: this doesn't catch cycles (would be too expensive/slow),
            #     just stalls.
            apply_stack_len = len(apply_stack)
            assert apply_stack_len != last_apply_stack_len
            last_apply_stack_len = apply_stack_len

            current_apply = apply_stack.pop()
            current_inputs = current_apply.inputs
            current_outputs = current_apply.outputs
            current_deps = current_inputs + current_apply.destroy_dependencies

            computed_ins = all(compute_map[v][0] for v in current_deps)
            computed_outs = all(compute_map[v][0] for v in current_outputs)

            if not thunks[self.node_idx[current_apply]].lazy:
                #
                # stack loop: Normal Non-Lazy Case
                # ================================
                #
                # Check if all inputs are in place
                # If so compute thunk and remove it from the apply_stack
                # If not leave it in, and add to the apply_stack those
                # that will produce you those inputs

                if computed_ins and not computed_outs:
                    # -- Non-lazy case: have inputs, time to compute outputs
                    try:
                        _, dt = self.run_thunk_of_node(current_apply)
                        del _
                        if config.profile:
                            current_idx = self.node_idx[current_apply]
                            self.call_counts[current_idx] += 1
                            self.call_times[current_idx] += dt
                            ## Computing the memory footprint of the the op
                            # ?? What about inplace .. if the op is inplace
                            # you don't actually ask for more memory!
                            for (idx, o) in enumerate(thunks[
                                    self.node_idx[current_apply]].outputs):
                                var = self.nodes[current_idx].outputs[idx]
                                if hasattr(var.type, 'get_shape_info'):
                                    sh = var.type.get_shape_info(o[0])
                                else:
                                    sh = 'input no shape'
                                self.variable_shape[var] = sh
                                st = getattr(o[0], 'strides',
                                             'input no strides')
                                if (getattr(o[0], 'flags', False)
                                        and o[0].flags.c_contiguous):
                                    st = 'c'
                                elif (hasattr(data[0], 'is_c_contiguous')
                                      and data[0].is_c_contiguous()):
                                    st = "c"
                                self.variable_strides[var] = st
                    except Exception:
                        raise_with_op(
                            current_apply,
                            self.thunks[self.node_idx[current_apply]])
                    for o in current_apply.outputs:
                        compute_map[o][0] = 1

                    input_index = []
                    # A list store the index of inputs variables

                    if self.allow_gc:
                        for i in current_apply.inputs:
                            # Garbage Collection -> check if anybody else uses
                            # this input
                            if (dependencies[i] and i.owner
                                    and i not in self.outputs):
                                if all(compute_map[v][0]
                                       for v in dependencies[i]):
                                    storage_map[i][0] = None
                                    input_index.append(
                                        current_apply.inputs.index(i))

                                    #DO NOT set compute_map to 0

                                    #If values become False and the
                                    #current_apply is still in the
                                    #stack, this will cause it to be
                                    #recomputed! This can cause wrong value
                                    #with some combination of inplace op.
                                    compute_map[i][0] = 2
                                    if (config.warn.vm_gc_bug
                                            and current_apply in apply_stack
                                            and getattr(
                                                current_apply.op,
                                                'destroy_map', False)):
                                        warnings.warn(
                                            "There was a bug that existed in the default Theano configuration,"
                                            " only in the development version between July 5th 2012"
                                            " and July 30th 2012. This was not in a released version."
                                            " The bug was affecting this script.",
                                            #The stack level is not good when inside a Scan.
                                            stacklevel=3)
                    self.node_cleared_order.append(input_index)

                elif not computed_ins:
                    # -- Non-lazy case, need inputs
                    apply_stack.append(current_apply)
                    apply_stack.extend(inp.owner for inp in current_deps
                                       if inp.owner)

            elif not computed_outs:
                #
                # stack loop: Lazy Evaluation Case
                # ================================
                #
                # Lazy evaluation protocol is to run the thunk with the
                # current storage_map and compute_map accessed via closure,
                # and the thunk will return a list of variables from its input
                # list that it requires.

                try:
                    requires, dt = self.run_thunk_of_node(current_apply)
                    current_idx = self.node_idx[current_apply]
                    self.call_counts[current_idx] += 1
                    self.call_times[current_idx] += dt

                except Exception:
                    raise_with_op(current_apply,
                                  self.thunks[self.node_idx[current_apply]])

                if requires:
                    for r in requires:
                        # We are not done with this op ..  so we added
                        # back and see to get the inputs we are
                        # missing
                        apply_stack.append(current_apply)
                        if current_apply.inputs[r].owner:
                            apply_stack.append(current_apply.inputs[r].owner)
                else:
                    if config.profile:
                        for (idx, o) in enumerate(
                                thunks[self.node_idx[current_apply]].outputs):
                            var = self.nodes[
                                self.node_idx[current_apply]].outputs[idx]

                            if hasattr(var.type, 'get_shape_info'):
                                sh = var.type.get_shape_info(o[0])
                            else:
                                sh = 'input no shape'
                            self.variable_shape[var] = sh
                            st = getattr(o[0], 'strides', 'input no strides')
                            if (getattr(o[0], 'flags', False)
                                    and o[0].flags.c_contiguous):
                                st = 'c'
                            elif (hasattr(data[0], 'is_c_contiguous')
                                  and data[0].is_c_contiguous()):
                                st = "c"
                            self.variable_strides[var] = st

                    input_index = []

                    if self.allow_gc:
                        for i in current_apply.inputs:
                            if (dependencies[i] and i.owner
                                    and i not in self.outputs):
                                empty_storage_map = True
                                for x in dependencies[i]:
                                    if not compute_map[x][0]:
                                        empty_storage_map = False
                                        break
                                if empty_storage_map:
                                    storage_map[i][0] = None
                                    input_index.append(
                                        current_apply.inputs.index(i))
                                    #See the not lazy gc code for explanations
                                    #of compute_map change
                                    compute_map[i][0] = 2

                    self.node_cleared_order.append(input_index)

        # Hacky coarse gc final pass
        # This is required until we have a proper gc algorithm for graphs with
        # lazy evaluation. See discussion on theano-dev June 19 2012.
        final_index = []

        if self.allow_gc:
            for v in storage_map:
                if v.owner and not v in self.outputs:
                    if compute_map[v][0] == 2:
                        continue
                    else:
                        storage_map[v][0] = None
                        final_index.append(v)
                        compute_map[v][0] = 2

        self.node_cleared_order.append(final_index)
コード例 #46
0
ファイル: vm.py プロジェクト: wycg1984/Theano
    def make_vm(self, nodes, thunks, input_storage, output_storage,
                storage_map, post_thunk_clear, computed, compute_map,
                updated_vars):

        pre_call_clear = [storage_map[v] for v in self.no_recycling]

        if (self.callback is not None
                or (config.profile and config.profile_memory)):

            if self.use_cloop and self.callback is not None:
                logger.warn('CVM does not support callback, using Stack VM.')
            if self.use_cloop and config.profile_memory:
                warnings.warn(
                    'CVM does not support memory profile, using Stack VM.')
            deps = None
            if self.allow_gc:
                deps = self.compute_gc_dependencies(storage_map)
            vm = Stack(nodes,
                       thunks,
                       pre_call_clear,
                       storage_map,
                       compute_map,
                       self.fgraph,
                       self.allow_gc,
                       dependencies=deps,
                       callback=self.callback)
        elif self.use_cloop:
            # create a map from nodes to ints and vars to ints
            nodes_idx = {}
            vars_idx = {}
            for i, node in enumerate(nodes):
                nodes_idx[node] = i
                for v in node.inputs + node.outputs:
                    vars_idx.setdefault(v, len(vars_idx))
            for v in self.fgraph.inputs + self.fgraph.outputs:
                vars_idx.setdefault(v, len(vars_idx))

            nodes_idx_inv = {}
            vars_idx_inv = {}
            for (node, i) in nodes_idx.items():
                nodes_idx_inv[i] = node
            for (var, i) in vars_idx.items():
                vars_idx_inv[i] = var

            # put storage_map and compute_map into a int-based scheme
            n_applies = len(nodes)
            storage_map_list = [
                storage_map[vars_idx_inv[i]] for i in xrange(len(vars_idx_inv))
            ]
            compute_map_list = [
                compute_map[vars_idx_inv[i]] for i in xrange(len(vars_idx_inv))
            ]
            if nodes:
                assert type(storage_map_list[0]) is list
                assert type(compute_map_list[0]) is list

            if self.allow_gc:
                dependency_map = self.compute_gc_dependencies(storage_map)
                dependency_map_list = [[
                    vars_idx[d] for d in dependency_map[vars_idx_inv[i]]
                ] for i in xrange(len(vars_idx_inv))]
            else:
                dependency_map_list = None

            # build the pointers to node inputs and offsets
            base_input_output_list = []
            node_n_inputs = []
            node_n_outputs = []
            node_input_offset = []
            node_output_offset = []
            for node in nodes:
                inputs_idx = [vars_idx[v] for v in node.inputs]
                outputs_idx = [vars_idx[v] for v in node.outputs]
                node_n_inputs.append(len(inputs_idx))
                node_n_outputs.append(len(outputs_idx))
                node_input_offset.append(len(base_input_output_list))
                base_input_output_list.extend(inputs_idx)
                node_output_offset.append(len(base_input_output_list))
                base_input_output_list.extend(outputs_idx)

            # build the var owner array
            var_owner = [None] * len(vars_idx)
            for (var, i) in vars_idx.items():
                if var.owner:
                    var_owner[i] = nodes_idx[var.owner]

            is_lazy_list = [int(th.lazy) for th in thunks]
            output_vars = [vars_idx[v] for v in self.fgraph.outputs]

            # builds the list of prereqs induced by e.g. destroy_handler
            ords = self.fgraph.orderings()
            node_prereqs = []
            node_output_size = []
            for i, node in enumerate(nodes):
                node_output_size.append(0)
                prereq_var_idxs = []
                for prereq_node in ords.get(node, []):
                    prereq_var_idxs.extend(
                        [vars_idx[v] for v in prereq_node.outputs])
                prereq_var_idxs = list(set(prereq_var_idxs))
                prereq_var_idxs.sort()  # TODO: why sort?
                node_prereqs.append(prereq_var_idxs)

            # Builds the list of input storage to update (according to update
            # rules) when the outputs are computed.
            # They are in the same order as the second part of output_vars
            # (output_vars contains first the returned outputs, then the
            # values of the update expressions).
            update_storage = []
            update_in_from_out = {}
            for (ivar, ovar) in updated_vars.items():
                update_in_from_out[vars_idx[ovar]] = vars_idx[ivar]
            for oidx in output_vars:
                if oidx in update_in_from_out:
                    update_storage.append(update_in_from_out[oidx])

            c0 = sys.getrefcount(node_n_inputs)
            vm = CVM(
                nodes,
                thunks,
                pre_call_clear,
                allow_gc=self.allow_gc,
                call_counts=[0] * len(nodes),
                call_times=[0.0] * len(nodes),
                compute_map_list=compute_map_list,
                storage_map_list=storage_map_list,
                base_input_output_list=base_input_output_list,
                node_n_inputs=node_n_inputs,
                node_n_outputs=node_n_outputs,
                node_input_offset=node_input_offset,
                node_output_offset=node_output_offset,
                var_owner=var_owner,
                is_lazy_list=is_lazy_list,
                output_vars=output_vars,
                node_prereqs=node_prereqs,
                node_output_size=node_output_size,
                update_storage=update_storage,
                dependencies=dependency_map_list,
            )
            assert c0 == sys.getrefcount(node_n_inputs)
        else:
            lazy = self.lazy
            if lazy is None:
                lazy = config.vm.lazy
            if lazy is None:
                lazy = not all([(not th.lazy) for th in thunks])
            if not lazy:
                # there is no conditional in the graph
                if self.allow_gc:
                    vm = LoopGC(nodes, thunks, pre_call_clear,
                                post_thunk_clear)
                else:
                    vm = Loop(nodes, thunks, pre_call_clear)
            else:
                deps = None
                if self.allow_gc:
                    deps = self.compute_gc_dependencies(storage_map)
                vm = Stack(nodes,
                           thunks,
                           pre_call_clear,
                           storage_map,
                           compute_map,
                           self.fgraph,
                           self.allow_gc,
                           dependencies=deps)
        return vm
コード例 #47
0
ファイル: test_nnet.py プロジェクト: lberrada/Theano
 def test_infer_shape(self):
     fff=theano.function([],outputs=softmax_with_bias(numpy.random.rand(3,4),numpy.random.rand(4)).shape)
     assert all(fff()==[3,4])
コード例 #48
0
ファイル: gradient.py プロジェクト: amishtal/Theano
def grad_sources_inputs(sources, graph_inputs, warn_type=True):
    """
    :type sources: list of pairs of Variable: (v, gradient-on-v)
    :param sources: gradients to back-propagate using chain rule
    :type graph_inputs: list of Variable
    :param graph_inputs: variables considered to be constant
        (do not backpropagate through them)

    :rtype: dictionary whose keys and values are of type `Variable`

    :return: mapping from each Variable encountered in the backward
        traversal to the gradient with respect to that Variable.

    It is assumed that there is some objective J shared between all members of
    sources, so that for each v, gradient-on-v is the gradient of J with
    respect to v




    """
    gmap = {}
    for (r, g_r) in sources:
        if not hasattr(r, 'type'):
            raise TypeError('sources must be Variables', r)
        if g_r is not None:
            if r in gmap:
                gmap[r] = gmap[r] + g_r
            else:
                gmap[r] = g_r

    graph_outputs = gof.utils.uniq([r for r, g in sources])

    if graph_inputs is None:
        graph_inputs = gof.graph.inputs(graph_outputs)

    for node in gof.graph.io_toposort(graph_inputs,
                                      graph_outputs).__reversed__():
        g_outputs = [gmap.get(o, None) for o in node.outputs]

        #if all output gradients are None, continue
        if all(map(lambda x: x is None, g_outputs)): continue

        output_arg = g_outputs
        input_arg = node.inputs

        # Each Op's grad function requires inputs and output_grads
        # If the Op destroys any input, but the grad expression uses it,
        # then chances are the resulting graph will have a dependency
        # cycle.  We avoid this cycle by passing (symbolic) copies of
        # each destroyed input.
        try:
            dinputs = [node.inputs[x[0]] for x in node.op.destroy_map.values()]
        except AttributeError:
            dinputs = []

        new_input_arg = []
        for input in input_arg:
            if input in dinputs and hasattr(input, 'copy'):
                new_input_arg.append(input.copy())
            else:
                new_input_arg.append(input)
        input_arg = new_input_arg

        #note that this function is not in a try-except block
        # the rationale:
        #  If the op implements grad, then any exception should be passed to
        #  the caller
        #  If the op doesn't implement grad, this entire function should fail.
        #  Other possibilities:
        #    * return a partial back-prop
        #
        op_grad = node.op.grad(input_arg, output_arg)
        if not isinstance(op_grad, (list, tuple)):
            raise ValueError(_msg_retType, node.op)
        g_inputs = op_grad
        assert isinstance(g_inputs, (list, tuple))
        if len(g_inputs) != len(node.inputs):
            raise ValueError(_msg_badlen,
                    node.op,
                    len(g_inputs),
                    len(node.inputs))
        for ii, (r, g_r) in enumerate(zip(node.inputs, g_inputs)):
            if warn_type:
                if g_r and (getattr(r, 'type', 0) != getattr(g_r, 'type', 1)):
                    r_type = getattr(r, 'type', None)
                    g_r_type = getattr(g_r, 'type', None)
                    _logger.warning('%s.grad returned a different type (%s) '
                            'for input %i of type (%s)',
                            node.op, g_r_type, ii, r_type)
            if g_r and len(sources) == 1 and sources[0][0].name and r.name:
                g_r.name = "(d%s/d%s)" % (sources[0][0].name, r.name)
            if g_r is not None:
                assert r is not None
                if r in gmap:
                    gmap[r] = gmap[r] + g_r
                else:
                    gmap[r] = g_r
    return gmap
コード例 #49
0
ファイル: test_nnet.py プロジェクト: lberrada/Theano
 def test_infer_shape(self):
     f=theano.function([],softmax(numpy.random.rand(3,4)).shape)
     assert all(f()==[3,4])
コード例 #50
0
    def with_linker(self,
                    linker,
                    scalar_op=scalar.add,
                    dtype="floatX",
                    test_nan=False,
                    tensor_op=None):
        for xsh, tosum in [((5, 6), None), ((5, 6), (0, 1)), ((5, 6), (0, )),
                           ((5, 6), (1, )), ((5, 6), (-1, )), ((5, 6), (-2, )),
                           ((5, 6), ()), ((2, 3, 4, 5), (0, 1, 3)),
                           ((2, 3, 4, 5), (-2, -3)), ((5, 0), None),
                           ((5, 0), (0, )), ((5, 0), (1, )), ((5, 0), ()),
                           ((), None), ((), ())]:
            if dtype == "floatX":
                dtype = theano.config.floatX
            x = TensorType(dtype, [(entry == 1) for entry in xsh])('x')
            if tensor_op is None:
                e = as_tensor_variable(self.op(scalar_op, axis=tosum)(x))
            else:
                e = as_tensor_variable(tensor_op(x, axis=tosum))

            if tosum is None:
                tosum = range(len(xsh))

            f = copy(linker).accept(FunctionGraph([x], [e])).make_function()
            xv = numpy.asarray(numpy.random.rand(*xsh))

            if not "int" in dtype:
                xv = numpy.asarray(xv, dtype=dtype)
            else:
                xv = numpy.asarray(xv < 0.5, dtype=dtype)

            if test_nan and xv.size > 0:
                if len(xsh) > 0:
                    xv = xv.flatten()
                    xv[0] = numpy.nan
                    xv = xv.reshape(*xsh)
                else:
                    xv = numpy.asarray(numpy.nan, dtype=dtype)
            zv = xv
            numpy_raised = False
            if len(tosum) > 1 and any([a < 0 for a in tosum]):
                #In that case, we need to use the good order of axis
                #in the reduction.
                axis2 = []
                for a in tosum:
                    if a < 0:
                        axis2.append(a + len(xsh))
                    else:
                        axis2.append(a)
                assert len(axis2) == len(tosum)
                tosum = tuple(axis2)
            if tensor_op == tensor.all:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.all(zv, axis)
                if len(tosum) == 0:
                    zv = zv != 0
            elif tensor_op == tensor.any:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.any(zv, axis)
                if len(tosum) == 0:
                    zv = zv != 0
            elif scalar_op == scalar.add:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.add.reduce(zv, axis)
            elif scalar_op == scalar.mul:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.multiply.reduce(zv, axis)
            elif scalar_op == scalar.maximum:
                try:
                    for axis in reversed(sorted(tosum)):
                        zv = numpy.maximum.reduce(zv, axis)
                except ValueError:
                    numpy_raised = True
            elif scalar_op == scalar.minimum:
                try:
                    for axis in reversed(sorted(tosum)):
                        zv = numpy.minimum.reduce(zv, axis)
                except ValueError:
                    numpy_raised = True
            elif scalar_op == scalar.or_:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.bitwise_or.reduce(zv, axis)
            elif scalar_op == scalar.and_:
                for axis in reversed(sorted(tosum)):
                    zv = numpy.bitwise_and.reduce(zv, axis)
            elif scalar_op == scalar.xor:
                # There is no identity value for the xor function
                # So we can't support shape of dimensions 0.
                if numpy.prod(zv.shape) == 0:
                    continue
                for axis in reversed(sorted(tosum)):
                    zv = numpy.bitwise_xor.reduce(zv, axis)
            else:
                raise Exception(
                    "Test for CAReduce with scalar_op %s not implemented" %
                    str(scalar_op))
            if scalar_op in [scalar.maximum, scalar.minimum] and numpy_raised:
                try:
                    out = f(xv)
                    assert out.dtype == dtype
                except ValueError:
                    pass
                else:
                    self.fail()
            else:
                # numpy.{all,any} return bool type,
                # but theano ops return an int8 array instead
                if scalar_op in [scalar.and_, scalar.or_]:
                    zv = numpy.asarray(zv, dtype='int8')
                if test_nan:
                    self.assertTrue(
                        theano.tensor.TensorType.values_eq(f(xv), zv),
                        (f(xv), zv))
                else:
                    f_xv = f(xv)
                    self.assertTrue((f_xv.shape == zv.shape), (f_xv, zv))
                    self.assertTrue(numpy.allclose(f_xv, zv), (f_xv, zv))

            #test CAReduce.infer_shape
            #the Shape op don't implement c_code!
            if isinstance(linker, gof.PerformLinker):
                x = TensorType(dtype, [(entry == 1) for entry in xsh])('x')
                if tensor_op is None:
                    e = self.op(scalar_op, axis=tosum)(x)
                else:
                    e = tensor_op(x, axis=tosum)
                if tosum is None:
                    tosum = range(len(xsh))
                f = copy(linker).accept(FunctionGraph(
                    [x], [e.shape])).make_function()
                if not (scalar_op in [scalar.maximum, scalar.minimum] and
                        ((xsh == () or numpy.prod(xsh) == 0))):
                    assert all(f(xv) == zv.shape)
コード例 #51
0
                for oy in N.arange(lbound[0], ubound[0], dy):
                    # loop over output image width
                    for ox in N.arange(lbound[1], ubound[1], dx):

                        # kern[l] is filter value to apply at (oj,oi)
                        # for (iy,ix)
                        l = 0

                        # ... ITERATE OVER INPUT UNITS IN RECEPTIVE FIELD
                        for ky in oy + N.arange(kshp[0]):
                            for kx in ox + N.arange(kshp[1]):

                                # verify if we are still within image
                                # boundaries. Equivalent to
                                # zero-padding of the input image
                                if (all((ky, kx) >= topleft)
                                        and all((ky, kx) < botright)):

                                    # convert to "valid" input space
                                    # coords used to determine column
                                    # index to write to in sparse mat
                                    iy, ix = N.array((ky, kx)) - topleft
                                    # determine raster-index of input pixel...

                                    # taking into account multiple
                                    # input features
                                    col = iy * inshp[2] + ix + \
                                          fmapi * N.prod(inshp[1:])

                                    # convert oy,ox values to output
                                    # space coordinates
コード例 #52
0
ファイル: test_nnet.py プロジェクト: lberrada/Theano
 def test_infer_shape(self):
     var = self.op(numpy.random.rand(3,5),numpy.random.rand(5), y_idx=numpy.random.randint(
             low=0, high=5, size=3))
     assert theano.function([],var[0].shape)() == [3]
     assert all(theano.function([],var[1].shape)() == [3,5])
     assert theano.function([],var[2].shape)() == [3]
コード例 #53
0
    def make_vm(self, nodes, thunks, input_storage, output_storage,
                storage_map, post_thunk_clear, computed, compute_map,
                updated_vars):

        pre_call_clear = [storage_map[v] for v in self.no_recycling]

        if self.callback is not None:
            if self.use_cloop:
                logger.warn('CLoop does not support callback, using Stack VM.')
            deps = None
            if self.allow_gc:
                deps = self.compute_gc_dependencies(storage_map)
            vm = Stack(nodes,
                       thunks,
                       pre_call_clear,
                       storage_map,
                       compute_map,
                       self.env,
                       self.allow_gc,
                       dependencies=deps,
                       callback=self.callback)
        elif self.use_cloop:
            # create a map from nodes to ints and vars to ints
            nodes_idx = {}
            vars_idx = {}
            for i, node in enumerate(nodes):
                nodes_idx[node] = i
                for v in node.inputs + node.outputs:
                    vars_idx.setdefault(v, len(vars_idx))
            for v in self.env.inputs + self.env.outputs:
                vars_idx.setdefault(v, len(vars_idx))

            nodes_idx_inv = {}
            vars_idx_inv = {}
            for (node, i) in nodes_idx.items():
                nodes_idx_inv[i] = node
            for (var, i) in vars_idx.items():
                vars_idx_inv[i] = var

            # put storage_map and compute_map into a int-based scheme
            n_applies = len(nodes)
            storage_map_list = [
                storage_map[vars_idx_inv[i]] for i in xrange(len(vars_idx_inv))
            ]
            compute_map_list = [
                compute_map[vars_idx_inv[i]] for i in xrange(len(vars_idx_inv))
            ]
            if nodes:
                assert type(storage_map_list[0]) is list
                assert type(compute_map_list[0]) is list

            if self.allow_gc:
                dependency_map = self.compute_gc_dependencies(storage_map)
                dependency_map_list = [[
                    vars_idx[d] for d in dependency_map[vars_idx_inv[i]]
                ] for i in xrange(len(vars_idx_inv))]
            else:
                dependency_map_list = None

            # build the pointers to node inputs and offsets
            base_input_output_list = []
            node_n_inputs = []
            node_n_outputs = []
            node_input_offset = []
            node_output_offset = []
            for node in nodes:
                inputs_idx = [vars_idx[v] for v in node.inputs]
                outputs_idx = [vars_idx[v] for v in node.outputs]
                node_n_inputs.append(len(inputs_idx))
                node_n_outputs.append(len(outputs_idx))
                node_input_offset.append(len(base_input_output_list))
                base_input_output_list.extend(inputs_idx)
                node_output_offset.append(len(base_input_output_list))
                base_input_output_list.extend(outputs_idx)

            # build the var owner array
            var_owner = [None] * len(vars_idx)
            for (var, i) in vars_idx.items():
                if var.owner:
                    var_owner[i] = nodes_idx[var.owner]

            is_lazy_list = [int(th.lazy) for th in thunks]
            output_vars = [vars_idx[v] for v in self.env.outputs]

            # builds the list of prereqs induced by e.g. destroy_handler
            ords = self.env.orderings()
            node_prereqs = []
            node_output_size = []
            for i, node in enumerate(nodes):
                node_output_size.append(0)
                prereq_var_idxs = []
                for prereq_node in ords.get(node, []):
                    prereq_var_idxs.extend(
                        [vars_idx[v] for v in prereq_node.outputs])
                prereq_var_idxs = list(set(prereq_var_idxs))
                prereq_var_idxs.sort()  # TODO: why sort?
                node_prereqs.append(prereq_var_idxs)

            update_storage = []
            for (ivar, ovar) in updated_vars.items():
                if ivar != ovar:
                    update_storage.append(vars_idx[ivar])  # dst
                    update_storage.append(vars_idx[ovar])  # src

            c0 = sys.getrefcount(node_n_inputs)
            vm = CVM(
                nodes,
                thunks,
                pre_call_clear,
                allow_gc=self.allow_gc,
                call_counts=[0] * len(nodes),
                call_times=[0.0] * len(nodes),
                compute_map_list=compute_map_list,
                storage_map_list=storage_map_list,
                base_input_output_list=base_input_output_list,
                node_n_inputs=node_n_inputs,
                node_n_outputs=node_n_outputs,
                node_input_offset=node_input_offset,
                node_output_offset=node_output_offset,
                var_owner=var_owner,
                is_lazy_list=is_lazy_list,
                output_vars=output_vars,
                node_prereqs=node_prereqs,
                node_output_size=node_output_size,
                update_storage=update_storage,
                dependencies=dependency_map_list,
            )
            assert c0 == sys.getrefcount(node_n_inputs)
        else:
            if all([(not th.lazy) for th in thunks]):
                # there is no conditional in the graph
                if self.allow_gc:
                    vm = LoopGC(nodes, thunks, pre_call_clear,
                                post_thunk_clear)
                else:
                    vm = Loop(nodes, thunks, pre_call_clear)
            else:
                deps = None
                if self.allow_gc:
                    deps = self.compute_gc_dependencies(storage_map)
                vm = Stack(nodes,
                           thunks,
                           pre_call_clear,
                           storage_map,
                           compute_map,
                           self.env,
                           self.allow_gc,
                           dependencies=deps)
        return vm
コード例 #54
0
 def test_infer_shape(self):
     fff = theano.function([],
                           outputs=softmax_with_bias(
                               numpy.random.rand(3, 4),
                               numpy.random.rand(4)).shape)
     assert all(fff() == [3, 4])
コード例 #55
0
ファイル: vm.py プロジェクト: bin2000/Theano
    def make_vm(self, nodes, thunks,
            input_storage, output_storage, storage_map,
            post_thunk_clear,
            computed,
            compute_map,
            updated_vars
            ):

        pre_call_clear = [storage_map[v] for v in self.no_recycling]

        if (self.callback is not None or
            (config.profile and config.profile_memory)):

            if self.use_cloop and self.callback is not None:
                logger.warn('CVM does not support callback, using Stack VM.')
            if self.use_cloop and config.profile_memory:
                warnings.warn(
                    'CVM does not support memory profile, using Stack VM.')
            deps = None
            if self.allow_gc:
                deps = self.compute_gc_dependencies(storage_map)
            vm = Stack(
                    nodes, thunks, pre_call_clear,
                    storage_map, compute_map,
                    self.fgraph, self.allow_gc,
                    dependencies=deps,
                    callback=self.callback)
        elif self.use_cloop:
            # create a map from nodes to ints and vars to ints
            nodes_idx = {}
            vars_idx = {}
            for i, node in enumerate(nodes):
                nodes_idx[node] = i
                for v in node.inputs + node.outputs:
                    vars_idx.setdefault(v, len(vars_idx))
            for v in self.fgraph.inputs + self.fgraph.outputs:
                vars_idx.setdefault(v, len(vars_idx))

            nodes_idx_inv = {}
            vars_idx_inv = {}
            for (node, i) in nodes_idx.items():
                nodes_idx_inv[i] = node
            for (var, i) in vars_idx.items():
                vars_idx_inv[i] = var

            # put storage_map and compute_map into a int-based scheme
            n_applies = len(nodes)
            storage_map_list = [storage_map[vars_idx_inv[i]]
                    for i in xrange(len(vars_idx_inv))]
            compute_map_list = [compute_map[vars_idx_inv[i]]
                    for i in xrange(len(vars_idx_inv))]
            if nodes:
                assert type(storage_map_list[0]) is list
                assert type(compute_map_list[0]) is list

            if self.allow_gc:
                dependency_map = self.compute_gc_dependencies(storage_map)
                dependency_map_list = [
                    [vars_idx[d] for d in dependency_map[vars_idx_inv[i]]]
                    for i in xrange(len(vars_idx_inv))]
            else:
                dependency_map_list = None

            # build the pointers to node inputs and offsets
            base_input_output_list = []
            node_n_inputs = []
            node_n_outputs = []
            node_input_offset = []
            node_output_offset = []
            for node in nodes:
                inputs_idx = [vars_idx[v] for v in node.inputs]
                outputs_idx = [vars_idx[v] for v in node.outputs]
                node_n_inputs.append(len(inputs_idx))
                node_n_outputs.append(len(outputs_idx))
                node_input_offset.append(len(base_input_output_list))
                base_input_output_list.extend(inputs_idx)
                node_output_offset.append(len(base_input_output_list))
                base_input_output_list.extend(outputs_idx)

            # build the var owner array
            var_owner = [None] * len(vars_idx)
            for (var, i) in vars_idx.items():
                if var.owner:
                    var_owner[i] = nodes_idx[var.owner]

            is_lazy_list = [int(th.lazy) for th in thunks]
            output_vars = [vars_idx[v] for v in self.fgraph.outputs]

            # builds the list of prereqs induced by e.g. destroy_handler
            ords = self.fgraph.orderings()
            node_prereqs = []
            node_output_size = []
            for i, node in enumerate(nodes):
                node_output_size.append(0)
                prereq_var_idxs = []
                for prereq_node in ords.get(node, []):
                    prereq_var_idxs.extend(
                            [vars_idx[v] for v in prereq_node.outputs])
                prereq_var_idxs = list(set(prereq_var_idxs))
                prereq_var_idxs.sort()  # TODO: why sort?
                node_prereqs.append(prereq_var_idxs)

            # Builds the list of input storage to update (according to update
            # rules) when the outputs are computed.
            # They are in the same order as the second part of output_vars
            # (output_vars contains first the returned outputs, then the
            # values of the update expressions).
            update_storage = []
            update_in_from_out = {}
            for (ivar, ovar) in updated_vars.items():
                update_in_from_out[vars_idx[ovar]] = vars_idx[ivar]
            for oidx in output_vars:
                if oidx in update_in_from_out:
                    update_storage.append(update_in_from_out[oidx])

            c0 = sys.getrefcount(node_n_inputs)
            vm = CVM(
                    nodes,
                    thunks,
                    pre_call_clear,
                    allow_gc=self.allow_gc,
                    call_counts=[0] * len(nodes),
                    call_times=[0.0] * len(nodes),
                    compute_map_list=compute_map_list,
                    storage_map_list=storage_map_list,
                    base_input_output_list=base_input_output_list,
                    node_n_inputs=node_n_inputs,
                    node_n_outputs=node_n_outputs,
                    node_input_offset=node_input_offset,
                    node_output_offset=node_output_offset,
                    var_owner=var_owner,
                    is_lazy_list=is_lazy_list,
                    output_vars=output_vars,
                    node_prereqs=node_prereqs,
                    node_output_size=node_output_size,
                    update_storage=update_storage,
                    dependencies=dependency_map_list,
                    )
            assert c0 == sys.getrefcount(node_n_inputs)
        else:
            lazy = self.lazy
            if lazy is None:
                lazy = config.vm.lazy
            if lazy is None:
                lazy = not all([(not th.lazy) for th in thunks])
            if not lazy:
                # there is no conditional in the graph
                if self.allow_gc:
                    vm = LoopGC(
                            nodes,
                            thunks,
                            pre_call_clear,
                            post_thunk_clear)
                else:
                    vm = Loop(
                            nodes,
                            thunks,
                            pre_call_clear)
            else:
                deps = None
                if self.allow_gc:
                    deps = self.compute_gc_dependencies(storage_map)
                vm = Stack(
                        nodes, thunks, pre_call_clear,
                        storage_map, compute_map,
                        self.fgraph, self.allow_gc,
                        dependencies=deps
                        )
        return vm
コード例 #56
0
ファイル: vm.py プロジェクト: bin2000/Theano
    def __call__(self):
        storage_map = self.storage_map
        compute_map = self.compute_map
        thunks = self.thunks
        dependencies = self.dependencies
        self.node_executed_order = []
        self.node_cleared_order = []

        for k in self.storage_map:
            compute_map[k][0] = (k.owner is None)

        # apply_stack contains nodes
        apply_stack = list(self.base_apply_stack)
        last_apply_stack_len = -1

        #This record all function inputs/shared varibles and constants
        for var, data in self.storage_map.iteritems():
            if data[0] is None:
                continue
            if hasattr(var.type, 'get_shape_info'):
                sh = var.type.get_shape_info(data[0])
            else:
                sh = 'input no shape'
            self.variable_shape[var] = sh
            st = getattr(data[0], 'strides', 'input no strides')
            if getattr(data[0], 'flags', False) and data[0].flags.c_contiguous:
                st = 'c'
            elif (hasattr(data[0], 'is_c_contiguous') and
                  data[0].is_c_contiguous()):
                st = "c"
            self.variable_strides[var] = st

        while apply_stack:
            # Make sure something happened last time round.  This is
            # just a safety check to make sure the op is written
            # correctly apply_stack should either decrease in length
            # by one (a thunk successfully applied), or increase in
            # length (added dependencies over and above the original).
            # NB: this doesn't catch cycles (would be too expensive/slow),
            #     just stalls.
            apply_stack_len = len(apply_stack)
            assert apply_stack_len != last_apply_stack_len
            last_apply_stack_len = apply_stack_len

            current_apply = apply_stack.pop()
            current_inputs = current_apply.inputs
            current_outputs = current_apply.outputs
            current_deps = current_inputs + current_apply.destroy_dependencies

            computed_ins = all(compute_map[v][0] for v in current_deps)
            computed_outs = all(compute_map[v][0] for v in current_outputs)

            if not thunks[self.node_idx[current_apply]].lazy:
                #
                # stack loop: Normal Non-Lazy Case
                # ================================
                #
                # Check if all inputs are in place
                # If so compute thunk and remove it from the apply_stack
                # If not leave it in, and add to the apply_stack those
                # that will produce you those inputs

                if computed_ins and not computed_outs:
                    # -- Non-lazy case: have inputs, time to compute outputs
                    try:
                        _, dt = self.run_thunk_of_node(current_apply)
                        del _
                        if config.profile:
                            current_idx = self.node_idx[current_apply]
                            self.call_counts[current_idx] += 1
                            self.call_times[current_idx] += dt
                            ## Computing the memory footprint of the the op
                            # ?? What about inplace .. if the op is inplace
                            # you don't actually ask for more memory!
                            for (idx, o) in enumerate(
                                    thunks[self.node_idx[
                                        current_apply]].outputs):
                                var = self.nodes[current_idx].outputs[idx]
                                if hasattr(var.type, 'get_shape_info'):
                                    sh = var.type.get_shape_info(o[0])
                                else:
                                    sh = 'input no shape'
                                self.variable_shape[var] = sh
                                st = getattr(o[0], 'strides',
                                             'input no strides')
                                if (getattr(o[0], 'flags', False) and
                                    o[0].flags.c_contiguous):
                                    st = 'c'
                                self.variable_strides[var] = st
                    except Exception:
                        raise_with_op(current_apply,
                                      self.thunks[self.node_idx[current_apply]])
                    for o in current_apply.outputs:
                        compute_map[o][0] = 1

                    input_index = []
                    # A list store the index of inputs variables

                    if self.allow_gc:
                        for i in current_apply.inputs:
                            # Garbage Collection -> check if anybody else uses
                            # this input
                            if (dependencies[i]
                                    and i.owner
                                    and i not in self.outputs):
                                if all(compute_map[v][0]
                                        for v in dependencies[i]):
                                    storage_map[i][0] = None
                                    input_index.append(current_apply.inputs.index(i))

                                    #DO NOT set compute_map to 0

                                    #If values become False and the
                                    #current_apply is still in the
                                    #stack, this will cause it to be
                                    #recomputed! This can cause wrong value
                                    #with some combination of inplace op.
                                    compute_map[i][0] = 2
                                    if (config.warn.vm_gc_bug and
                                        current_apply in apply_stack and
                                        getattr(current_apply.op,
                                                'destroy_map',
                                                False)):
                                        warnings.warn(
        "There was a bug that existed in the default Theano configuration,"
        " only in the development version between July 5th 2012"
        " and July 30th 2012. This was not in a released version."
        " The bug was affecting this script.",
        #The stack level is not good when inside a Scan.
        stacklevel=3
                                        )
                    self.node_cleared_order.append(input_index)

                elif not computed_ins:
                    # -- Non-lazy case, need inputs
                    apply_stack.append(current_apply)
                    apply_stack.extend(inp.owner
                            for inp in current_deps
                            if inp.owner)


            elif not computed_outs:
                #
                # stack loop: Lazy Evaluation Case
                # ================================
                #
                # Lazy evaluation protocol is to run the thunk with the
                # current storage_map and compute_map accessed via closure,
                # and the thunk will return a list of variables from its input
                # list that it requires.

                try:
                    requires, dt = self.run_thunk_of_node(current_apply)
                    current_idx = self.node_idx[current_apply]
                    self.call_counts[current_idx] += 1
                    self.call_times[current_idx] += dt

                except Exception:
                    raise_with_op(current_apply,
                                  self.thunks[self.node_idx[current_apply]])

                if requires:
                    for r in requires:
                        # We are not done with this op ..  so we added
                        # back and see to get the inputs we are
                        # missing
                        apply_stack.append(current_apply)
                        if current_apply.inputs[r].owner:
                            apply_stack.append(current_apply.inputs[r].owner)
                else:
                    if config.profile:
                        for (idx, o) in enumerate(thunks[
                                self.node_idx[current_apply]].outputs):
                            var = self.nodes[
                                self.node_idx[current_apply]].outputs[idx]

                            if hasattr(var.type, 'get_shape_info'):
                                sh = var.type.get_shape_info(o[0])
                            else:
                                sh = 'input no shape'
                            self.variable_shape[var] = sh
                            st = getattr(o[0], 'strides', 'input no strides')
                            if (getattr(o[0], 'flags', False) and
                                o[0].flags.c_contiguous):
                                st = 'c'
                            self.variable_strides[var] = st

                    input_index = []

                    if self.allow_gc:
                        for i in current_apply.inputs:
                            if (dependencies[i] and i.owner and
                                i not in self.outputs):
                                empty_storage_map = True
                                for x in dependencies[i]:
                                    if not compute_map[x][0]:
                                        empty_storage_map = False
                                        break
                                if empty_storage_map:
                                    storage_map[i][0] = None
                                    input_index.append(current_apply.inputs.index(i)) 
                                    #See the not lazy gc code for explanations
                                    #of compute_map change
                                    compute_map[i][0] = 2

                    self.node_cleared_order.append(input_index)

        # Hacky coarse gc final pass
        # This is required until we have a proper gc algorithm for graphs with
        # lazy evaluation. See discussion on theano-dev June 19 2012.
        final_index = []

        if self.allow_gc:
            for v in storage_map:
                if v.owner and not v in self.outputs:
                    if compute_map[v][0] == 2:
                        continue
                    else:
                        storage_map[v][0] = None
                        final_index.append(v)
                        compute_map[v][0] = 2

        self.node_cleared_order.append(final_index)
コード例 #57
0
    def uniform(self,
                size,
                low=0.0,
                high=1.0,
                ndim=None,
                dtype=None,
                nstreams=None):
        """
        Sample a tensor of given size whose element from a uniform
        distribution between low and high.

        If the size argument is ambiguous on the number of dimensions,
        ndim may be a plain integer to supplement the missing
        information.

        :param low: Lower bound of the interval on which values are sampled.
        If the ``dtype`` arg is provided, ``low`` will be cast into dtype.

        :param high: Higher bound of the interval on which values are sampled.
        If the ``dtype`` arg is provided, ``high`` will be cast into dtype.

        :param size: Can be a list of integer or Theano variable
                (ex: the shape of other Theano Variable)

        :param dtype: The output data type. If dtype is not specified, it will
        be inferred from the dtype of low and high, but will be at least as
        precise as floatX.
        """
        low = as_tensor_variable(low)
        high = as_tensor_variable(high)
        if dtype is None:
            dtype = scal.upcast(config.floatX, low.dtype, high.dtype)

        low = cast(low, dtype=dtype)
        high = cast(high, dtype=dtype)

        if isinstance(size, tuple):
            msg = "size must be a tuple of int or a Theano variable"
            assert all([
                isinstance(i, (numpy.integer, int, Variable)) for i in size
            ]), msg
            if any(
                [isinstance(i, (numpy.integer, int)) and i <= 0
                 for i in size]):
                raise ValueError(
                    "The specified size contains a dimension with value <= 0",
                    size)

        else:
            if not (isinstance(size, Variable) and size.ndim == 1):
                raise TypeError("size must be a tuple of int or a Theano "
                                "Variable with 1 dimension, got " + str(size) +
                                " of type " + str(type(size)))

        if nstreams is None:
            nstreams = self.n_streams(size)

        if self.use_cuda and dtype == 'float32':
            rstates = self.get_substream_rstates(nstreams)
            rstates = rstates.flatten()
            # HACK - we use fact that int32 and float32 have same size to
            # sneak ints into the CudaNdarray type.
            # these *SHOULD NEVER BE USED AS FLOATS*
            tmp_float_buf = numpy.frombuffer(rstates.data, dtype='float32')
            assert tmp_float_buf.shape == rstates.shape
            assert (tmp_float_buf.view('int32') == rstates).all()
            # transfer to device
            node_rstate = float32_shared_constructor(tmp_float_buf)
            assert isinstance(node_rstate.type, CudaNdarrayType)

            # we can't use the normal mrg_uniform constructor + later
            # optimization
            # because of the tmp_float_buf hack above.  There is
            # currently no Theano node that will do a frombuffer
            # reinterpretation.
            u = self.pretty_return(
                node_rstate,
                *GPU_mrg_uniform.new(node_rstate, ndim, dtype, size))
        else:
            node_rstate = shared(self.get_substream_rstates(nstreams))
            u = self.pretty_return(
                node_rstate, *mrg_uniform.new(node_rstate, ndim, dtype, size))
        r = u * (high - low) + low

        if u.type.broadcastable != r.type.broadcastable:
            raise NotImplementedError(
                'Increase the size to match the broadcasting pattern of '
                '`low` and `high` arguments')

        assert r.dtype == dtype
        return r
コード例 #58
0
    def normal(self,
               size,
               avg=0.0,
               std=1.0,
               ndim=None,
               dtype=None,
               nstreams=None):
        """
        :param size: Can be a list of integers or Theano variables (ex: the
        shape of another Theano Variable)

        :param dtype: The output data type. If dtype is not specified, it will
        be inferred from the dtype of low and high, but will be at least as
        precise as floatX.

        :param nstreams: Number of streams.
        """
        # We need an even number of ]0,1[ samples. Then we split them
        # in two halves. First half becomes our U1's for Box-Muller,
        # second half our U2's. See Wikipedia page:
        # http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
        avg = as_tensor_variable(avg)
        std = as_tensor_variable(std)

        if dtype is None:
            dtype = scal.upcast(config.floatX, avg.dtype, std.dtype)

        avg = cast(avg, dtype)
        std = cast(std, dtype)

        evened = False
        constant = False
        if isinstance(size, tuple) and all(
            [isinstance(i, (numpy.integer, int)) for i in size]):
            constant = True
            # Force dtype because it defaults to float when size is empty
            n_samples = numpy.prod(size, dtype='int64')

            if n_samples % 2 == 1:
                n_samples += 1
                evened = True
        else:
            #if even, don't change, if odd, +1
            n_samples = prod(size) + (prod(size) % 2)
        flattened = self.uniform(size=(n_samples, ),
                                 dtype=dtype,
                                 nstreams=nstreams)

        if constant:
            U1 = flattened[:n_samples // 2]
            U2 = flattened[n_samples // 2:]
        else:
            U1 = flattened[:prod(flattened.shape) // 2]
            U2 = flattened[prod(flattened.shape) // 2:]

        #normal_samples = zeros_like(flattened)
        sqrt_ln_U1 = sqrt(-2.0 * log(U1))
        # TypeError: 'TensorVariable' object does not support item assignment
        # so this doesn't work...
        #normal_samples[:n_samples/2] = sqrt_ln_U1 * cos(2.0*numpy.pi*U2)
        #normal_samples[n_samples/2:] = sqrt_ln_U1 * sin(2.0*numpy.pi*U2)

        # so trying this instead
        first_half = sqrt_ln_U1 * cos(
            numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
        second_half = sqrt_ln_U1 * sin(
            numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
        normal_samples = join(0, first_half, second_half)

        final_samples = None
        if evened:
            final_samples = normal_samples[:-1]
        elif constant:
            final_samples = normal_samples
        else:
            final_samples = normal_samples[:prod(size)]

        if not size:
            # Force the dtype to be int64, otherwise reshape complains
            size = tensor.constant(size, dtype='int64')
        final_samples = final_samples.reshape(size)

        final_samples = avg + std * final_samples

        assert final_samples.dtype == dtype
        return final_samples
コード例 #59
0
ファイル: gradient.py プロジェクト: jsalvatier/Theano-1
def grad_sources_inputs(sources, graph_inputs, warn_type=True):
    """
    A gradient source is a pair (``v``, ``g_v``), in which ``v`` is
    a `Variable`, and ``g_v`` is a `Variable` that is a gradient wrt
    ``v``. More specifically, ``g_v`` is the gradient of an external
    scalar cost, ``cost`` (that is not explicitly used), wrt ``v``.

    This function traverses the graph backward from the ``r`` sources,
    calling ``op.grad(...)`` for all ops with some non-None gradient
    on an output, to compute gradients of ``cost`` wrt intermediate
    variables and ``graph_inputs``.

    The ``op.grad(...)`` functions are called like this:

    .. code-block:: python

        op.grad(op.inputs[:], [total_gradient(v) for v in op.outputs])

    This call to ``op.grad`` should return a list or tuple: one symbolic
    gradient per input. These gradients represent the gradients of
    the same implicit ``cost`` mentionned above, wrt ``op.inputs``.  Note
    that this is **not** the same as the gradient of ``op.outputs`` wrt
    ``op.inputs``.

    If ``op`` has a single input, then ``op.grad`` should return a list
    or tuple of length 1.
    For each input wrt to which ``op`` is not differentiable, it should
    return ``None`` instead of a `Variable` instance.

    If a source ``r`` receives a gradient from another source ``r2``,
    then the effective gradient on ``r`` is the sum of both gradients.



    :type sources: list of pairs of Variable: (v, gradient-on-v) to
                   initialize the total_gradient dictionary
    :param sources: gradients to back-propagate using chain rule
    :type graph_inputs: list of Variable
    :param graph_inputs: variables considered to be constant
        (do not backpropagate through them)
    :type warn_type: bool
    :param warn_type: True will trigger warnings via the logging module when
       the gradient on an expression has a different type than the original
       expression

    :rtype: dictionary whose keys and values are of type Variable
    :return: mapping from each Variable encountered in the backward
        traversal to the gradient with respect to that Variable.

    It is assumed that there is some objective J shared between all members of
    sources, so that for each v, gradient-on-v is the gradient of J with
    respect to v

    """
    gmap = {}
    for (r, g_r) in sources:
        if not hasattr(r, 'type'):
            raise TypeError('sources must be Variables', r)
        if g_r is not None:
            if r in gmap:
                gmap[r] = gmap[r] + g_r
            else:
                gmap[r] = g_r

    graph_outputs = gof.utils.uniq([r for r, g in sources])

    if graph_inputs is None:
        graph_inputs = gof.graph.inputs(graph_outputs)

    for node in gof.graph.io_toposort(graph_inputs,
                                      graph_outputs).__reversed__():
        g_outputs = [gmap.get(o, None) for o in node.outputs]

        #if all output gradients are None, continue
        if all(map(lambda x: x is None, g_outputs)): continue

        output_arg = g_outputs
        input_arg = node.inputs

        # Each Op's grad function requires inputs and output_grads
        # If the Op destroys any input, but the grad expression uses it,
        # then chances are the resulting graph will have a dependency
        # cycle.  We avoid this cycle by passing (symbolic) copies of
        # each destroyed input.
        try:
            dinputs = [node.inputs[x[0]] for x in node.op.destroy_map.values()]
        except AttributeError:
            dinputs = []

        new_input_arg = []
        for input in input_arg:
            if input in dinputs and hasattr(input, 'copy'):
                new_input_arg.append(input.copy())
            else:
                new_input_arg.append(input)
        input_arg = new_input_arg

        #note that this function is not in a try-except block
        # the rationale:
        #  If the op implements grad, then any exception should be passed to
        #  the caller
        #  If the op doesn't implement grad, this entire function should fail.
        #  Other possibilities:
        #    * return a partial back-prop
        #
        op_grad = node.op.grad(input_arg, output_arg)
        if not isinstance(op_grad, (list, tuple)):
            raise ValueError(_msg_retType, node.op)
        g_inputs = op_grad
        assert isinstance(g_inputs, (list, tuple))
        if len(g_inputs) != len(node.inputs):
            raise ValueError(_msg_badlen, node.op, len(g_inputs),
                             len(node.inputs))
        for ii, (r, g_r) in enumerate(zip(node.inputs, g_inputs)):
            if warn_type:
                if g_r and (getattr(r, 'type', 0) != getattr(g_r, 'type', 1)):
                    r_type = getattr(r, 'type', None)
                    g_r_type = getattr(g_r, 'type', None)
                    _logger.warning(
                        '%s.grad returned a different type (%s) '
                        'for input %i of type (%s)', node.op, g_r_type, ii,
                        r_type)
            if g_r and len(sources) == 1 and sources[0][0].name and r.name:
                g_r.name = "(d%s/d%s)" % (sources[0][0].name, r.name)
            if g_r is not None:
                assert r is not None
                if r in gmap:
                    gmap[r] = gmap[r] + g_r
                else:
                    gmap[r] = g_r
    return gmap