Exemplo n.º 1
0
def test_elemwise2():
    """ Several kinds of elemwise expressions with dimension permutations """
    rng = numpy.random.RandomState(int(time.time()))
    print 'random?', rng.rand(3)
    shape = (3,5)
    for pattern in [(0,1), (1,0)]:
        a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),dtype='float32'), name=None)
        b = tensor.Tensor(dtype='float32', broadcastable=[0]*len(shape))()
        f = pfunc([b], [], updates=[(a, (a+b).dimshuffle(pattern))], mode=mode_with_gpu)
        has_elemwise = False
        for i, node in enumerate(f.maker.env.toposort()):
            print >> sys.stdout, i, node
            has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
        assert not has_elemwise
        #let debugmode catch errors
        print >> sys.stdout, 'pattern', pattern
        f(theano._asarray(rng.rand(*shape),dtype='float32')*.3)

    shape = (3,4,5,6)
    a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),dtype='float32'), 'a')
    b = tensor.Tensor(dtype='float32', broadcastable=[0]*len(shape))()
    f = pfunc([b], [], updates=[(a, (a+b).dimshuffle([2,0,3,1]) *
        tensor.exp(b**a).dimshuffle([2,0,3,1]))], mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.env.toposort()):
        print i, node
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    #let debugmode catch errors
    f(theano._asarray(rng.rand(*shape),dtype='float32'))
Exemplo n.º 2
0
    def make_node(self, y, y_starts, y_lengths, g_costs, **kwargs):
        y = T.as_tensor_variable(y)
        y_lengths = T.as_tensor_variable(y_lengths)
        y_starts = T.as_tensor_variable(y_starts)
        g_costs = T.as_tensor_variable(g_costs)

        if (y.type.ndim != 3 or y.type.dtype not in T.float_dtypes):
            raise ValueError('y must be 3-d tensor of floats', y.type)

        if (y_lengths.type.ndim != 1
                or y_lengths.type.dtype not in T.discrete_dtypes):
            raise ValueError('y_lengths must be 1-d tensor of integers',
                             y_lengths.type)

        if (y_starts.type.ndim != 1
                or y_starts.type.dtype not in T.discrete_dtypes):
            raise ValueError('y_starts must be 1-d tensor of integers',
                             y_starts.type)

        if (g_costs.type.ndim != 1
                or g_costs.type.dtype not in T.float_dtypes):
            raise ValueError('g_costs must be 1-d tensor of floats',
                             g_costs.type)

        return Apply(
            self, [y, y_starts, y_lengths, g_costs],
            [T.Tensor(dtype=y.dtype, broadcastable=y.type.broadcastable)()])
Exemplo n.º 3
0
    def make_node(self, softmaxes, y_idxes, y_lengths, y_startidxes, **kwargs):
        softmaxes = T.as_tensor_variable(softmaxes)
        y_idxes = T.as_tensor_variable(y_idxes)
        y_lengths = T.as_tensor_variable(y_lengths)
        y_startidxes = T.as_tensor_variable(y_startidxes)
        if (softmaxes.type.ndim != 3
                or softmaxes.type.dtype not in T.float_dtypes):
            raise ValueError('dy must be 3-d tensor of floats', softmaxes.type)

        if (y_idxes.type.ndim != 2
                or y_idxes.type.dtype not in T.discrete_dtypes):
            raise ValueError('y_idxes must be 2-d tensor of integers',
                             y_idxes.type)

        if (y_lengths.type.ndim != 1
                or y_lengths.type.dtype not in T.discrete_dtypes):
            raise ValueError('y_lengths must be 1-d tensor of integers',
                             y_lengths.type)

        if (y_startidxes.type.ndim != 1
                or y_startidxes.type.dtype not in T.discrete_dtypes):
            raise ValueError('y_startidxes must be 1-d tensor of integers',
                             y_startidxes.type)

        return Apply(
            self, [softmaxes, y_idxes, y_lengths, y_startidxes],
            [T.Tensor(dtype=softmaxes.dtype, broadcastable=[False])()])
Exemplo n.º 4
0
def run_conv_nnet1(use_gpu):
    if use_gpu:
        shared_fn = tcn.shared_constructor
    else:
        shared_fn = shared
    n_batch = 16
    n_kern = 20
    shape_img = (n_batch, 1, 32, 32)
    shape_kern = (n_kern, 1, 5, 5)
    n_train = 10
    if config.mode == 'DEBUG_MODE':
        n_train = 1

    logical_hid_shape = tcn.blas.GpuConv.logical_output_shape_2d(
        shape_img[2:], shape_kern[2:], 'valid')
    n_hid = n_kern * logical_hid_shape[0] * logical_hid_shape[1]
    n_out = 10

    w = shared_fn(0.01 * (my_rand(*shape_kern) - 0.5), 'w')
    b = shared_fn(my_zeros((n_kern,)), 'b')
    v = shared_fn(my_zeros((n_hid, n_out)), 'c')
    c = shared_fn(my_zeros(n_out), 'c')

    x = tensor.Tensor(dtype='float32', broadcastable=(0, 1, 0, 0))('x')
    y = tensor.fmatrix('y')
    lr = tensor.fscalar('lr')

    conv_op = conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern, n_batch, 1, 1)

    hid = tensor.tanh(conv_op(x, w) + b.dimshuffle((0, 'x', 'x')))
    hid_flat = hid.reshape((n_batch, n_hid))
    out = tensor.tanh(tensor.dot(hid_flat, v) + c)
    loss = tensor.sum(0.5 * (out - y) ** 2 * lr)
    # print 'loss type', loss.type

    params = [w, b, v, c]
    gparams = tensor.grad(loss, params)

    mode = get_mode(use_gpu)

    # print 'building pfunc ...'
    train = pfunc(
        [x, y, lr],
        [loss],
        mode=mode,
        updates=[(p, p - g) for p, g in zip(params, gparams)])

#    for i, n in enumerate(train.maker.fgraph.toposort()):
#        print i, n

    xval = my_rand(*shape_img)
    yval = my_rand(n_batch, n_out)
    lr = theano._asarray(0.01, dtype='float32')

    for i in xrange(n_train):
        rval = train(xval, yval, lr)
    # print 'training done'
    print_mode(mode)
    return rval
Exemplo n.º 5
0
    def test_opt_unpack(self):
        #
        # Test that a graph involving
        # structured_dot(assembled_csc_matrix) is optimized to be just
        # a structured_dot_csc Op and no assembly of a csc_matrix.
        #
        # The optimization from structured_dot -> structured_dot_csc
        # is currently disabled, So this test is not expected to pass

        return
        #
        kerns = tensor.Tensor(dtype='int64', broadcastable=[False])('kerns')
        spmat = sp.lil_matrix((4, 6), dtype='int64')
        for i in range(5):
            # set non-zeros in random locations (row x, col y)
            x = numpy.floor(numpy.random.rand() * spmat.shape[0])
            y = numpy.floor(numpy.random.rand() * spmat.shape[1])
            spmat[x, y] = numpy.random.rand() * 10
        spmat = sp.csc_matrix(spmat)

        images = tensor.Tensor(dtype='float32',
                               broadcastable=[False, False])('images')

        cscmat = CSC(kerns, spmat.indices[:spmat.size], spmat.indptr,
                     spmat.shape)
        f = theano.function([kerns, images], structured_dot(cscmat, images.T))

        sdcscpresent = False
        for node in f.maker.env.toposort():
            print node.op
            assert not isinstance(node.op, CSM)
            assert not isinstance(node.op, CSMProperties)
            if isinstance(f.maker.env.toposort()[1].op, StructuredDotCSC):
                sdcscpresent = True
        assert sdcscpresent

        kernvals = numpy.array(spmat.data[:spmat.size])
        #print 'kdtype', kernvals.dtype, kernvals.shape,
        #print kernvals.ndim, kernvals.dtype.num
        #print 'type of kernvals = ', kernvals.dtype
        bsize = 3
        imvals = 1.0 * numpy.array(numpy.arange(bsize * spmat.shape[1]).\
                reshape(bsize, spmat.shape[1]), dtype='float32')
        outvals = f(kernvals, imvals)
        print outvals
Exemplo n.º 6
0
def cmp_sigmoids(shape):
    def numpy_sigmoid(input):
        rval = 1.0 / (1.0 + numpy.exp(-input))
    sinput = tensor.Tensor(dtype='float32', broadcastable=(0,)*len(shape))()
    shared_input = tcn.shared_constructor(numpy.random.rand(*shape), 'shared_input')
    times = compare_fns(
            dict( numpy=numpy_sigmoid
                , theano_cpu=pfunc([sinput], 1.0 / (1.0 + tensor.exp(-sinput)))
                , theano_gpu_onboard=pfunc([sinput], [], updates=[(shared_input, 1.0 / (1.0 + tensor.exp(-shared_input)))])
                ),
            input=shared_input.value)
    showtimes(times)
Exemplo n.º 7
0
 def make_node(self, coding_dist, true_one_of_n):
     _coding_dist = T.as_tensor_variable(coding_dist)
     _true_one_of_n = T.as_tensor_variable(true_one_of_n)
     if _coding_dist.type.ndim != 2:
         raise TypeError('matrix required for argument: coding_dist')
     if _true_one_of_n.type not in (T.lvector, T.ivector):
         raise TypeError(
             'integer vector required for argument: true_one_of_n'
             '(got type: %s instead of: %s)' % (_true_one_of_n.type,
                                                T.lvector))
     return gof.Apply(self, [_coding_dist, _true_one_of_n],
                  [T.Tensor(dtype=_coding_dist.dtype,
                   broadcastable=[False])()])
Exemplo n.º 8
0
def build_conv_nnet2_classif(use_gpu, isize, ksize, n_batch,
                             downsample_ops=True, verbose=0, version=-1,
                             check_isfinite=True):
    if use_gpu:
        shared_fn = tcn.shared_constructor
    else:
        shared_fn = shared

    isize1 = isize
    isize2 = isize
    if isinstance(isize, (tuple, )):
        isize1 = isize[0]
        isize2 = isize[1]

    shape_img = (n_batch, 1, isize1, isize2)

    n_kern = 20  # 6 were used in LeNet5
    shape_kern = (n_kern, 1, ksize, ksize)

    n_kern1 = 30  # 16 were used in LeNet5
    shape_kern1 = (n_kern1, n_kern, ksize, ksize)

    logical_hid_shape = tcn.blas.GpuConv.logical_output_shape_2d(
        (isize1, isize2), (ksize, ksize), 'valid')
    logical_hid_shape1 = tcn.blas.GpuConv.logical_output_shape_2d(
        (logical_hid_shape[0] // 2, logical_hid_shape[1] // 2),
        (ksize, ksize), 'valid')
    n_hid = n_kern1 * logical_hid_shape1[0] * logical_hid_shape1[1]
    n_out = 10

    w0 = shared_fn(0.01 * (my_rand(*shape_kern) - 0.5), 'w0')
    b0 = shared_fn(my_zeros((n_kern,)), 'b0')
    w1 = shared_fn(0.01 * (my_rand(*shape_kern1) - 0.5), 'w1')
    b1 = shared_fn(my_zeros((n_kern1,)), 'b1')
    v = shared_fn(0.01 * my_randn(n_hid, n_out), 'v')
    c = shared_fn(my_zeros(n_out), 'c')

    # print 'ALLOCATING ARCH: w0 shape', w0.get_value(borrow=True).shape
    # print 'ALLOCATING ARCH: w1 shape', w1.get_value(borrow=True).shape
    # print 'ALLOCATING ARCH: v shape', v.get_value(borrow=True).shape

    x = tensor.Tensor(dtype='float32', broadcastable=(0, 1, 0, 0))('x')
    y = tensor.fmatrix('y')
    lr = tensor.fscalar('lr')

    conv_op = conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern,
                          n_batch, 1, 1, verbose=verbose, version=version)
    conv_op1 = conv.ConvOp(
        (n_kern, logical_hid_shape[0] // 2, logical_hid_shape[1] // 2),
        shape_kern1[2:], n_kern1, n_batch, 1, 1, verbose=verbose, version=version)

    ds_op = pool.Pool((2, 2), ignore_border=False)
    if downsample_ops:
        hid = tensor.tanh(ds_op(conv_op(x, w0) + b0.dimshuffle((0, 'x', 'x'))))
    else:
        hid = tensor.tanh(
            (conv_op(x, w0) + b0.dimshuffle(
                (0, 'x', 'x')))[:, :, ::2, ::2])
    hid1 = tensor.tanh(conv_op1(hid, w1) + b1.dimshuffle((0, 'x', 'x')))
    hid_flat = hid1.reshape((n_batch, n_hid))
    out = tensor.nnet.softmax(tensor.dot(hid_flat, v) + c)
    loss = tensor.sum(tensor.nnet.crossentropy_categorical_1hot(
        out, tensor.argmax(y, axis=1)) * lr)
    # print 'loss type', loss.type

    params = [w0, b0, w1, b1, v, c]
    gparams = tensor.grad(loss, params)

    mode = get_mode(use_gpu, check_isfinite)

    # print 'building pfunc ...'
    train = pfunc(
        [x, y, lr],
        [loss],
        mode=mode,
        updates=[(p, p - g) for p, g in zip(params, gparams)])

    if verbose:
        theano.printing.debugprint(train)
    if use_gpu:
        # Check that GpuConv is used
        topo = train.maker.fgraph.toposort()
        conv_ops = (tcn.blas.GpuConv,
                    tcn.dnn.GpuDnnConv,
                    tcn.dnn.GpuDnnConvGradI,
                    tcn.dnn.GpuDnnConvGradW,
                    tcn.blas.BaseGpuCorrMM)

        assert len([n for n in topo if isinstance(n.op, conv_ops)]) > 0

    shape_target = (n_batch, n_out)
    return train, params, shape_img, shape_target, mode
Exemplo n.º 9
0
def run_conv_nnet2(use_gpu):  # pretend we are training LeNet for MNIST
    if use_gpu:
        shared_fn = tcn.shared_constructor
    else:
        shared_fn = shared

    # cumulativ rounding error affect this comparaison of result. So we lower the tolerance.
    # TODO: why the last two example see the error lower? We are converging?
    # n_train=10, n_batch=3, n_kern=1, n_kern1=1, error see of 1e-9
    # n_train=10, n_batch=3, n_kern=10, n_kern1=1, error see of -1.27777e-06
    # n_train=10, n_batch=3, n_kern=10, n_kern1=10, error see of -6.91377e-05
    # n_train=10, n_batch=30, n_kern=10, n_kern1=10, error see of -0.00185963
    # n_train=10, n_batch=60, n_kern=10, n_kern1=10, error see of -5.26905e-05
    # n_train=30, n_batch=60, n_kern=10, n_kern1=10, error see of -3.8147e-06

    # n_train=30, n_batch=60, n_kern=20, n_kern1=10, error see of 6.82771e-05
    # n_train=30, n_batch=60, n_kern=20, n_kern1=30, error see of 0.000231534
    n_batch = 60
    shape_img = (n_batch, 1, 32, 32)

    n_kern = 20
    shape_kern = (n_kern, 1, 5, 5)

    n_kern1 = 10
    shape_kern1 = (n_kern1, n_kern, 5, 5)

    n_train = 30
    if config.mode == 'DEBUG_MODE':
        n_train = 1

    logical_hid_shape = tcn.blas.GpuConv.logical_output_shape_2d(tuple(
        shape_img[2:]), tuple(shape_kern[2:]), 'valid')
    logical_hid_shape1 = tcn.blas.GpuConv.logical_output_shape_2d(
        (logical_hid_shape[0] // 2, logical_hid_shape[1] // 2),
        tuple(shape_kern1[2:]), 'valid')
    n_hid = n_kern1 * logical_hid_shape1[0] * logical_hid_shape1[1]
    n_out = 10

    w0 = shared_fn(0.01 * (my_rand(*shape_kern) - 0.5), 'w0')
    b0 = shared_fn(my_zeros((n_kern,)), 'b0')
    w1 = shared_fn(0.01 * (my_rand(*shape_kern1) - 0.5), 'w1')
    b1 = shared_fn(my_zeros((n_kern1,)), 'b1')
    v = shared_fn(my_zeros((n_hid, n_out)), 'c')
    c = shared_fn(my_zeros(n_out), 'c')

    x = tensor.Tensor(dtype='float32', broadcastable=(0, 1, 0, 0))('x')
    y = tensor.fmatrix('y')
    lr = tensor.fscalar('lr')

    conv_op = conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern, n_batch, 1, 1)
    conv_op1 = conv.ConvOp((n_kern, logical_hid_shape[0] // 2,
                            logical_hid_shape[1] // 2),
                           shape_kern1[2:],
                           n_kern1, n_batch, 1, 1)

    hid = tensor.tanh(conv_op(x, w0) + b0.dimshuffle((0, 'x', 'x')))
    hid1 = tensor.tanh(conv_op1(hid[:, :, ::2, ::2], w1) + b1.dimshuffle((
        0, 'x', 'x')))
    hid_flat = hid1.reshape((n_batch, n_hid))
    out = tensor.tanh(tensor.dot(hid_flat, v) + c)
    loss = tensor.sum(0.5 * (out - y) ** 2 * lr)
    # print 'loss type', loss.type

    params = [w0, b0, w1, b1, v, c]
    gparams = tensor.grad(loss, params)

    mode = get_mode(use_gpu)

    # print 'building pfunc ...'
    train = pfunc(
        [x, y, lr],
        [loss],
        mode=mode,
        updates=[(p, p - g) for p, g in zip(params, gparams)])

#    for i, n in enumerate(train.maker.fgraph.toposort()):
#        print i, n

    xval = my_rand(*shape_img)
    yval = my_rand(n_batch, n_out)  # int32 make all 0...
    lr = theano._asarray(0.01, dtype='float32')
    for i in xrange(n_train):
        rval = train(xval, yval, lr)

    print_mode(mode)
    return rval